captains_log/
rotation.rs

1use file_rotate::compression::Compression;
2use file_rotate::suffix::{
3    AppendCount, AppendTimestamp, DateFrom, FileLimit, Representation, SuffixScheme,
4};
5use file_rotate::SuffixInfo;
6use flate2::write::GzEncoder;
7use parking_lot::Mutex;
8use std::cell::UnsafeCell;
9use std::collections::BTreeSet;
10use std::fs::{self, File, OpenOptions};
11use std::io;
12use std::mem::transmute;
13use std::path::{Path, PathBuf};
14use std::sync::Arc;
15use std::thread;
16use std::time::{Duration, SystemTime};
17
18#[derive(Hash, Clone, Copy, PartialEq)]
19pub enum Age {
20    Day,
21    Hour,
22}
23
24/// Define the time to rotate files.
25#[derive(Hash, Clone, Copy, PartialEq)]
26pub struct ByAge {
27    /// Rotate the file by day / hour.
28    pub age_type: Age,
29
30    /// Similar to system's log-rotate,
31    /// For Age::Day, the latest archive use yesterday's timestamp;
32    /// For Age::Hour, use last hour's timestamp.
33    pub use_last_time: bool,
34}
35
36#[derive(Hash, Clone, Copy, PartialEq)]
37pub enum Upkeep {
38    /// Log file  older than the duration will be deleted.
39    Age(chrono::TimeDelta),
40    /// Only keeps the number of old logs.
41    Count(usize),
42    /// Does not delete any old logs.
43    All,
44}
45
46/// Log rotation configuration.
47///
48/// `by_age` and `by_size` can be configured at the same time, means log will be rotate when any of the conditions met.
49/// It's not valid when `by_age` and `by_size` both None.
50#[derive(Hash)]
51pub struct Rotation {
52    pub by_age: Option<ByAge>,
53    pub by_size: Option<u64>,
54
55    /// If None, archive in `file.<number>` form, and Upkeep::Age will be ignore.
56    ///
57    /// If Some, archive in `file.<datetime>` form.
58    pub time_fmt: Option<&'static str>,
59
60    /// How to cleanup the old file
61    pub upkeep: Upkeep,
62
63    /// Whether to move the log into an archive_dir. if not configured, it's the same dir as
64    /// current log.
65    pub archive_dir: Option<PathBuf>,
66
67    /// When Some(count), indicate how many uncompressed archived logs. When 0, all the archive logs are compressed.
68    /// When None, do not compress archive logs;
69    pub compress_exclude: Option<usize>,
70}
71
72impl Rotation {
73    ///
74    /// Archive log when size is reached.
75    ///
76    /// # Arguments:
77    ///
78    /// - `max_files`: When None, do not delete old files
79    pub fn by_size(size_limit: u64, max_files: Option<usize>) -> Self {
80        let upkeep =
81            if let Some(_max_files) = max_files { Upkeep::Count(_max_files) } else { Upkeep::All };
82        Self {
83            by_age: None,
84            by_size: Some(size_limit),
85            time_fmt: None,
86            upkeep,
87            archive_dir: None,
88            compress_exclude: None,
89        }
90    }
91
92    /// Rotate log by time (Day/Hour)
93    ///
94    /// # Arguments:
95    ///
96    /// - `use_last_time`: Similar to system's log-rotate,
97    ///
98    ///     - For Age::Day, the latest archive use yesterday's timestamp;
99    ///
100    ///     - For Age::Hour, use last hour's timestamp.
101    ///
102    /// - `time_fmt`: timestamp format of the archived files
103    ///
104    /// - `max_time`: Delete archived logs older than the time
105    pub fn by_age(
106        age: Age, use_last_time: bool, time_fmt: &'static str, max_time: Option<chrono::TimeDelta>,
107    ) -> Self {
108        let upkeep =
109            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
110        Self {
111            by_age: Some(ByAge { age_type: age, use_last_time }),
112            by_size: None,
113            time_fmt: Some(time_fmt),
114            upkeep,
115            compress_exclude: None,
116            archive_dir: None,
117        }
118    }
119
120    /// Rotate log when time (Day/Hour) is reached or when size is reached
121    ///
122    /// # Arguments:
123    ///
124    /// - `use_last_time`: Similar to system's log-rotate,
125    ///
126    ///     - For Age::Day, the latest archive use yesterday's timestamp;
127    ///
128    ///     - For Age::Hour, use last hour's timestamp.
129    ///
130    /// - `time_fmt`: timestamp format of the archived files
131    ///
132    /// - `max_time`: Delete archived logs older than the time
133    pub fn by_age_and_size(
134        age: Age, size_limit: u64, use_last_time: bool, time_fmt: &'static str,
135        max_time: Option<chrono::TimeDelta>,
136    ) -> Self {
137        let upkeep =
138            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
139        Self {
140            by_age: Some(ByAge { age_type: age, use_last_time }),
141            by_size: Some(size_limit),
142            time_fmt: Some(time_fmt),
143            upkeep,
144            compress_exclude: None,
145            archive_dir: None,
146        }
147    }
148
149    /// Compress archived logs, with a number of recent files left uncompressed.
150    ///
151    /// When `un_compress_files` == 0, means all files will be compressed.
152    pub fn compress_exclude(mut self, un_compress_files: usize) -> Self {
153        self.compress_exclude.replace(un_compress_files);
154        self
155    }
156
157    /// Move the old logs into an `archive_dir`.
158    pub fn archive_dir<P: Into<PathBuf>>(mut self, archive_dir: P) -> Self {
159        self.archive_dir.replace(archive_dir.into());
160        self
161    }
162
163    pub(crate) fn build(&self, file_path: &Path) -> LogRotate {
164        assert!(
165            self.by_age.is_some() || self.by_size.is_some(),
166            "by_age and by_size can not be both None"
167        );
168        let archive_dir = if let Some(_dir) = &self.archive_dir {
169            _dir.clone()
170        } else {
171            // TODO FIXME
172            file_path.parent().unwrap().to_path_buf()
173        };
174        let mut size = None;
175        let mut age = None;
176        let mut date_from = DateFrom::Now;
177        if let Some(by_age) = &self.by_age {
178            if by_age.use_last_time {
179                match by_age.age_type {
180                    Age::Hour => {
181                        date_from = DateFrom::DateHourAgo;
182                    }
183                    Age::Day => {
184                        date_from = DateFrom::DateYesterday;
185                    }
186                }
187            }
188            age.replace(LimiterAge::new(by_age.age_type));
189        }
190        if let Some(_size) = &self.by_size {
191            size.replace(LimiterSize::new(*_size));
192        }
193        let c = if let Some(compress) = &self.compress_exclude {
194            Compression::OnRotate(*compress)
195        } else {
196            Compression::None
197        };
198        let backend;
199        if let Some(time_fmt) = self.time_fmt {
200            let file_limit = match self.upkeep {
201                Upkeep::Age(d) => FileLimit::Age(d),
202                Upkeep::Count(c) => FileLimit::MaxFiles(c),
203                Upkeep::All => FileLimit::Unlimited,
204            };
205            let schema = AppendTimestamp { format: time_fmt, file_limit, date_from };
206            backend = Backend::Time(UnsafeCell::new(_Backend::new(
207                archive_dir.clone(),
208                file_path,
209                self.upkeep,
210                c,
211                schema,
212            )));
213        } else {
214            let file_limit = match self.upkeep {
215                Upkeep::Age(_) => 0,
216                Upkeep::Count(c) => c,
217                Upkeep::All => 0,
218            };
219            let schema = AppendCount::new(file_limit);
220            backend = Backend::Num(UnsafeCell::new(_Backend::new(
221                archive_dir.clone(),
222                file_path,
223                self.upkeep,
224                c,
225                schema,
226            )));
227        }
228        return LogRotate {
229            size_limit: size,
230            age_limit: age,
231            backend: Arc::new(backend),
232            th: Mutex::new(None),
233        };
234    }
235}
236
237pub(crate) struct LogRotate {
238    size_limit: Option<LimiterSize>,
239    age_limit: Option<LimiterAge>,
240    backend: Arc<Backend>,
241    th: Mutex<Option<thread::JoinHandle<()>>>,
242}
243
244impl LogRotate {
245    pub fn rotate<S: FileSinkTrait>(&self, sink: &S) -> bool {
246        let mut need_rotate = false;
247        if let Some(age) = self.age_limit.as_ref() {
248            if age.check(sink) {
249                need_rotate = true;
250            }
251        }
252        if let Some(size) = self.size_limit.as_ref() {
253            if size.check(sink) {
254                need_rotate = true;
255            }
256        }
257        if need_rotate == false {
258            return false;
259        }
260        self.wait();
261
262        self.backend.rename_files();
263        let backend = self.backend.clone();
264        let th = thread::spawn(move || {
265            let _ = backend.handle_old_files();
266        });
267        self.th.lock().replace(th);
268        true
269    }
270
271    /// Wait for the last handle_old_files to finish.
272    pub fn wait(&self) {
273        if let Some(th) = self.th.lock().take() {
274            let _ = th.join();
275        }
276    }
277}
278
279pub(crate) struct LimiterSize {
280    limit: u64,
281}
282
283impl LimiterSize {
284    pub fn new(size: u64) -> Self {
285        Self { limit: size }
286    }
287
288    #[inline]
289    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
290        return sink.get_size() > self.limit;
291    }
292}
293
294pub(crate) struct LimiterAge {
295    limit: Duration,
296}
297
298impl LimiterAge {
299    pub fn new(limit: Age) -> Self {
300        Self {
301            limit: match limit {
302                Age::Hour => Duration::from_secs(60 * 60),
303                Age::Day => Duration::from_secs(24 * 60 * 60),
304            },
305        }
306    }
307
308    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
309        let now = SystemTime::now();
310        let start_ts = sink.get_create_time();
311        match now.duration_since(start_ts) {
312            Ok(d) => return d > self.limit,
313            Err(_) => return true, // system time rotate back
314        }
315    }
316}
317
318pub(crate) trait FileSinkTrait {
319    fn get_create_time(&self) -> SystemTime;
320
321    fn get_size(&self) -> u64;
322}
323
324enum Backend {
325    Num(UnsafeCell<_Backend<AppendCount>>),
326    Time(UnsafeCell<_Backend<AppendTimestamp>>),
327}
328
329unsafe impl Send for Backend {}
330unsafe impl Sync for Backend {}
331
332impl Backend {
333    fn rename_files(&self) {
334        match self {
335            Self::Num(_inner) => {
336                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
337                inner.rename_files();
338            }
339            Self::Time(_inner) => {
340                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
341                inner.rename_files();
342            }
343        }
344    }
345
346    fn handle_old_files(&self) -> io::Result<()> {
347        match self {
348            Self::Num(_inner) => {
349                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
350                inner.handle_old_files()
351            }
352            Self::Time(_inner) => {
353                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
354                inner.handle_old_files()
355            }
356        }
357    }
358}
359
360/// Adaptation to file-rotate crate (Copyright (c) 2020 BourgondAries, MIT license)
361struct _Backend<S: SuffixScheme> {
362    archive_dir: PathBuf,
363    base_path: PathBuf, // log_path replaced parent with archive_dir
364    log_path: PathBuf,  // current log
365    compress: Compression,
366    suffix_scheme: S,
367    /// The bool is whether or not there's a .gz suffix to the filename
368    suffixes: BTreeSet<SuffixInfo<S::Repr>>,
369    upkeep: Upkeep,
370}
371
372fn compress(path: &Path) -> io::Result<()> {
373    let dest_path = PathBuf::from(format!("{}.gz", path.display()));
374
375    let mut src_file = File::open(path)?;
376    let dest_file = OpenOptions::new().write(true).create(true).append(false).open(&dest_path)?;
377
378    assert!(path.exists());
379    assert!(dest_path.exists());
380    let mut encoder = GzEncoder::new(dest_file, flate2::Compression::default());
381    io::copy(&mut src_file, &mut encoder)?;
382
383    fs::remove_file(path)?;
384
385    Ok(())
386}
387
388impl<S: SuffixScheme> _Backend<S> {
389    fn new(
390        archive_dir: PathBuf, file: &Path, upkeep: Upkeep, compress: Compression, schema: S,
391    ) -> Self {
392        let base_path = archive_dir.as_path().join(Path::new(file.file_name().unwrap()));
393        let mut s = Self {
394            archive_dir,
395            log_path: file.to_path_buf(),
396            base_path,
397            upkeep,
398            compress,
399            suffix_scheme: schema,
400            suffixes: BTreeSet::new(),
401        };
402        s.ensure_dir();
403        s.scan_suffixes();
404        s
405    }
406
407    #[inline]
408    fn ensure_dir(&self) {
409        if !self.archive_dir.exists() {
410            let _ = fs::create_dir_all(&self.archive_dir).expect("create dir");
411        }
412    }
413
414    #[inline]
415    fn scan_suffixes(&mut self) {
416        self.suffixes = self.suffix_scheme.scan_suffixes(&self.base_path);
417    }
418
419    #[inline]
420    fn rename_files(&mut self) {
421        self.ensure_dir();
422        let new_suffix_info = self._move_file_with_suffix(None).expect("move files");
423        self.suffixes.insert(new_suffix_info);
424    }
425
426    #[inline]
427    fn handle_old_files(&mut self) -> io::Result<()> {
428        // Find the youngest suffix that is too old, and then remove all suffixes that are older or
429        // equally old:
430        // Start from oldest suffix, stop when we find a suffix that is not too old
431        let mut result = Ok(());
432        if let Upkeep::All = &self.upkeep {
433        } else {
434            let mut youngest_old = None;
435            for (i, suffix) in self.suffixes.iter().enumerate().rev() {
436                if self.suffix_scheme.too_old(&suffix.suffix, i) {
437                    result = result.and(fs::remove_file(suffix.to_path(&self.base_path)));
438                    youngest_old = Some((*suffix).clone());
439                } else {
440                    break;
441                }
442            }
443            if let Some(youngest_old) = youngest_old {
444                // Removes all the too old
445                let _ = self.suffixes.split_off(&youngest_old);
446            }
447        }
448
449        // Compression
450        if let Compression::OnRotate(max_file_n) = self.compress {
451            let n = (self.suffixes.len() as i32 - max_file_n as i32).max(0) as usize;
452            // The oldest N files should be compressed
453            let suffixes_to_compress = self
454                .suffixes
455                .iter()
456                .rev()
457                .take(n)
458                .filter(|info| !info.compressed)
459                .cloned()
460                .collect::<Vec<_>>();
461            for info in suffixes_to_compress {
462                // Do the compression
463                let path = info.suffix.to_path(&self.base_path);
464                compress(&path)?;
465
466                self.suffixes.replace(SuffixInfo { compressed: true, ..info });
467            }
468        }
469        result
470    }
471
472    /// Recursive function that keeps moving files if there's any file name collision.
473    /// If `suffix` is `None`, it moves from basepath to next suffix given by the SuffixScheme
474    /// Assumption: Any collision in file name is due to an old log file.
475    ///
476    /// Returns the suffix of the new file (the last suffix after possible cascade of renames).
477    fn _move_file_with_suffix(
478        &mut self, old_suffix_info: Option<SuffixInfo<S::Repr>>,
479    ) -> io::Result<SuffixInfo<S::Repr>> {
480        // NOTE: this newest_suffix is there only because AppendTimestamp specifically needs
481        // it. Otherwise it might not be necessary to provide this to `rotate_file`. We could also
482        // have passed the internal BTreeMap itself, but it would require to make SuffixInfo `pub`.
483        let newest_suffix = self.suffixes.iter().next().map(|info| &info.suffix);
484
485        let new_suffix = self.suffix_scheme.rotate_file(
486            &self.base_path,
487            newest_suffix,
488            &old_suffix_info.clone().map(|i| i.suffix),
489        )?;
490
491        // The destination file/path eventual .gz suffix must match the source path
492        let new_suffix_info = SuffixInfo {
493            suffix: new_suffix,
494            compressed: old_suffix_info.as_ref().map(|x| x.compressed).unwrap_or(false),
495        };
496        let new_path = new_suffix_info.to_path(&self.base_path);
497
498        // Whatever exists that would block a move to the new suffix
499        let existing_suffix_info = self.suffixes.get(&new_suffix_info).cloned();
500
501        // Move destination file out of the way if it exists
502        let newly_created_suffix = if let Some(existing_suffix_info) = existing_suffix_info {
503            // We might move files in a way that the destination path doesn't equal the path that
504            // was replaced. Due to possible `.gz`, a "conflicting" file doesn't mean that paths
505            // are equal.
506            self.suffixes.replace(new_suffix_info);
507            // Recurse to move conflicting file.
508            self._move_file_with_suffix(Some(existing_suffix_info))?
509        } else {
510            new_suffix_info
511        };
512
513        let old_path = match old_suffix_info {
514            Some(suffix) => suffix.to_path(&self.base_path),
515            None => self.log_path.clone(), // When archive_dir and parent of log_path is different
516        };
517        // Do the move
518        assert!(old_path.exists());
519        assert!(!new_path.exists());
520        fs::rename(old_path, new_path)?;
521
522        Ok(newly_created_suffix)
523    }
524}