captains_log/
rotation.rs

1//! ## Rotation configuration
2//!
3//! Config can be divided into these dimensions:
4//!
5//! * How to trigger log-rotate:
6//!
7//!     - [Rotation::by_age()]: File name will be configurable time_fmt
8//!
9//!     - [Rotation::by_age_and_size()]
10//!
11//!     - [Rotation::by_size()]: File name will be configurable time_fmt
12//!
13//! * Where to archive: Optional directory to move the file into.
14//!
15//! * How the cleanup the file:
16//!
17//!     - [Upkeep]
18//!
19//!     - [Rotation::compress_exclude]
20//!
21use file_rotate::compression::Compression;
22use file_rotate::suffix::{
23    AppendCount, AppendTimestamp, DateFrom, FileLimit, Representation, SuffixScheme,
24};
25use file_rotate::SuffixInfo;
26use flate2::write::GzEncoder;
27use parking_lot::Mutex;
28use std::cell::UnsafeCell;
29use std::collections::BTreeSet;
30use std::fs::{self, File, OpenOptions};
31use std::io;
32use std::mem::transmute;
33use std::path::{Path, PathBuf};
34use std::sync::Arc;
35use std::thread;
36use std::time::{Duration, SystemTime};
37
38#[derive(Hash, Clone, Copy, PartialEq)]
39pub enum Age {
40    Day,
41    Hour,
42}
43
44/// Define the time to rotate files.
45#[derive(Hash, Clone, Copy, PartialEq)]
46pub struct ByAge {
47    /// Rotate the file by day / hour.
48    pub age_type: Age,
49
50    /// Similar to system's log-rotate,
51    /// For Age::Day, the latest archive use yesterday's timestamp;
52    /// For Age::Hour, use last hour's timestamp.
53    pub use_last_time: bool,
54}
55
56/// Define how to cleanup old files
57#[derive(Hash, Clone, Copy, PartialEq)]
58pub enum Upkeep {
59    /// Log file  older than the duration will be deleted.
60    Age(chrono::TimeDelta),
61    /// Only keeps the number of old logs.
62    Count(usize),
63    /// Does not delete any old logs.
64    All,
65}
66
67/// Log rotation configuration.
68///
69/// `by_age` and `by_size` can be configured at the same time, means log will be rotate when any of the conditions met.
70/// It's not valid when `by_age` and `by_size` both None.
71#[derive(Hash)]
72pub struct Rotation {
73    pub by_age: Option<ByAge>,
74    pub by_size: Option<u64>,
75
76    /// If None, archive in `file.<number>` form, and Upkeep::Age will be ignore.
77    ///
78    /// If Some, archive in `file.<datetime>` form.
79    pub time_fmt: Option<&'static str>,
80
81    /// How to cleanup the old file
82    pub upkeep: Upkeep,
83
84    /// Whether to move the log into an archive_dir. if not configured, it's the same dir as
85    /// current log.
86    pub archive_dir: Option<PathBuf>,
87
88    /// When Some(count), indicate how many uncompressed archived logs. When 0, all the archive logs are compressed.
89    /// When None, do not compress archive logs;
90    pub compress_exclude: Option<usize>,
91}
92
93impl Rotation {
94    ///
95    /// Archive log when size is reached.
96    ///
97    /// # Arguments:
98    ///
99    /// - `max_files`: When None, do not delete old files
100    pub fn by_size(size_limit: u64, max_files: Option<usize>) -> Self {
101        let upkeep =
102            if let Some(_max_files) = max_files { Upkeep::Count(_max_files) } else { Upkeep::All };
103        Self {
104            by_age: None,
105            by_size: Some(size_limit),
106            time_fmt: None,
107            upkeep,
108            archive_dir: None,
109            compress_exclude: None,
110        }
111    }
112
113    /// Rotate log by time (Day/Hour)
114    ///
115    /// # Arguments:
116    ///
117    /// - `use_last_time`: Similar to system's log-rotate,
118    ///
119    ///     - For Age::Day, the latest archive use yesterday's timestamp;
120    ///
121    ///     - For Age::Hour, use last hour's timestamp.
122    ///
123    /// - `time_fmt`: timestamp format of the archived files
124    ///
125    /// - `max_time`: Delete archived logs older than the time
126    pub fn by_age(
127        age: Age, use_last_time: bool, time_fmt: &'static str, max_time: Option<chrono::TimeDelta>,
128    ) -> Self {
129        let upkeep =
130            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
131        Self {
132            by_age: Some(ByAge { age_type: age, use_last_time }),
133            by_size: None,
134            time_fmt: Some(time_fmt),
135            upkeep,
136            compress_exclude: None,
137            archive_dir: None,
138        }
139    }
140
141    /// Rotate log when time (Day/Hour) is reached or when size is reached
142    ///
143    /// # Arguments:
144    ///
145    /// - `use_last_time`: Similar to system's log-rotate,
146    ///
147    ///     - For Age::Day, the latest archive use yesterday's timestamp;
148    ///
149    ///     - For Age::Hour, use last hour's timestamp.
150    ///
151    /// - `time_fmt`: timestamp format of the archived files
152    ///
153    /// - `max_time`: Delete archived logs older than the time
154    pub fn by_age_and_size(
155        age: Age, size_limit: u64, use_last_time: bool, time_fmt: &'static str,
156        max_time: Option<chrono::TimeDelta>,
157    ) -> Self {
158        let upkeep =
159            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
160        Self {
161            by_age: Some(ByAge { age_type: age, use_last_time }),
162            by_size: Some(size_limit),
163            time_fmt: Some(time_fmt),
164            upkeep,
165            compress_exclude: None,
166            archive_dir: None,
167        }
168    }
169
170    /// Compress archived logs, with a number of recent files left uncompressed.
171    ///
172    /// When `un_compress_files` == 0, means all files will be compressed.
173    pub fn compress_exclude(mut self, un_compress_files: usize) -> Self {
174        self.compress_exclude.replace(un_compress_files);
175        self
176    }
177
178    /// Move the old logs into an `archive_dir`.
179    pub fn archive_dir<P: Into<PathBuf>>(mut self, archive_dir: P) -> Self {
180        self.archive_dir.replace(archive_dir.into());
181        self
182    }
183
184    pub(crate) fn build(&self, file_path: &Path) -> LogRotate {
185        assert!(
186            self.by_age.is_some() || self.by_size.is_some(),
187            "by_age and by_size can not be both None"
188        );
189        let archive_dir = if let Some(_dir) = &self.archive_dir {
190            _dir.clone()
191        } else {
192            // TODO FIXME
193            file_path.parent().unwrap().to_path_buf()
194        };
195        let mut size = None;
196        let mut age = None;
197        let mut date_from = DateFrom::Now;
198        if let Some(by_age) = &self.by_age {
199            if by_age.use_last_time {
200                match by_age.age_type {
201                    Age::Hour => {
202                        date_from = DateFrom::DateHourAgo;
203                    }
204                    Age::Day => {
205                        date_from = DateFrom::DateYesterday;
206                    }
207                }
208            }
209            age.replace(LimiterAge::new(by_age.age_type));
210        }
211        if let Some(_size) = &self.by_size {
212            size.replace(LimiterSize::new(*_size));
213        }
214        let c = if let Some(compress) = &self.compress_exclude {
215            Compression::OnRotate(*compress)
216        } else {
217            Compression::None
218        };
219        let backend;
220        if let Some(time_fmt) = self.time_fmt {
221            let file_limit = match self.upkeep {
222                Upkeep::Age(d) => FileLimit::Age(d),
223                Upkeep::Count(c) => FileLimit::MaxFiles(c),
224                Upkeep::All => FileLimit::Unlimited,
225            };
226            let schema = AppendTimestamp { format: time_fmt, file_limit, date_from };
227            backend = Backend::Time(UnsafeCell::new(_Backend::new(
228                archive_dir.clone(),
229                file_path,
230                self.upkeep,
231                c,
232                schema,
233            )));
234        } else {
235            let file_limit = match self.upkeep {
236                Upkeep::Age(_) => 0,
237                Upkeep::Count(c) => c,
238                Upkeep::All => 0,
239            };
240            let schema = AppendCount::new(file_limit);
241            backend = Backend::Num(UnsafeCell::new(_Backend::new(
242                archive_dir.clone(),
243                file_path,
244                self.upkeep,
245                c,
246                schema,
247            )));
248        }
249        return LogRotate {
250            size_limit: size,
251            age_limit: age,
252            backend: Arc::new(backend),
253            th: Mutex::new(None),
254        };
255    }
256}
257
258pub(crate) struct LogRotate {
259    size_limit: Option<LimiterSize>,
260    age_limit: Option<LimiterAge>,
261    backend: Arc<Backend>,
262    th: Mutex<Option<thread::JoinHandle<()>>>,
263}
264
265impl LogRotate {
266    pub fn rotate<S: FileSinkTrait>(&self, sink: &S) -> bool {
267        let mut need_rotate = false;
268        if let Some(age) = self.age_limit.as_ref() {
269            if age.check(sink) {
270                need_rotate = true;
271            }
272        }
273        if let Some(size) = self.size_limit.as_ref() {
274            if size.check(sink) {
275                need_rotate = true;
276            }
277        }
278        if need_rotate == false {
279            return false;
280        }
281        self.wait();
282
283        self.backend.rename_files();
284        let backend = self.backend.clone();
285        let th = thread::spawn(move || {
286            let _ = backend.handle_old_files();
287        });
288        self.th.lock().replace(th);
289        true
290    }
291
292    /// Wait for the last handle_old_files to finish.
293    pub fn wait(&self) {
294        if let Some(th) = self.th.lock().take() {
295            let _ = th.join();
296        }
297    }
298}
299
300pub(crate) struct LimiterSize {
301    limit: u64,
302}
303
304impl LimiterSize {
305    pub fn new(size: u64) -> Self {
306        Self { limit: size }
307    }
308
309    #[inline]
310    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
311        return sink.get_size() > self.limit;
312    }
313}
314
315pub(crate) struct LimiterAge {
316    limit: Duration,
317}
318
319impl LimiterAge {
320    pub fn new(limit: Age) -> Self {
321        Self {
322            limit: match limit {
323                Age::Hour => Duration::from_secs(60 * 60),
324                Age::Day => Duration::from_secs(24 * 60 * 60),
325            },
326        }
327    }
328
329    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
330        let now = SystemTime::now();
331        let start_ts = sink.get_create_time();
332        match now.duration_since(start_ts) {
333            Ok(d) => return d > self.limit,
334            Err(_) => return true, // system time rotate back
335        }
336    }
337}
338
339pub(crate) trait FileSinkTrait {
340    fn get_create_time(&self) -> SystemTime;
341
342    fn get_size(&self) -> u64;
343}
344
345enum Backend {
346    Num(UnsafeCell<_Backend<AppendCount>>),
347    Time(UnsafeCell<_Backend<AppendTimestamp>>),
348}
349
350unsafe impl Send for Backend {}
351unsafe impl Sync for Backend {}
352
353impl Backend {
354    fn rename_files(&self) {
355        match self {
356            Self::Num(_inner) => {
357                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
358                inner.rename_files();
359            }
360            Self::Time(_inner) => {
361                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
362                inner.rename_files();
363            }
364        }
365    }
366
367    fn handle_old_files(&self) -> io::Result<()> {
368        match self {
369            Self::Num(_inner) => {
370                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
371                inner.handle_old_files()
372            }
373            Self::Time(_inner) => {
374                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
375                inner.handle_old_files()
376            }
377        }
378    }
379}
380
381/// Adaptation to file-rotate crate (Copyright (c) 2020 BourgondAries, MIT license)
382struct _Backend<S: SuffixScheme> {
383    archive_dir: PathBuf,
384    base_path: PathBuf, // log_path replaced parent with archive_dir
385    log_path: PathBuf,  // current log
386    compress: Compression,
387    suffix_scheme: S,
388    /// The bool is whether or not there's a .gz suffix to the filename
389    suffixes: BTreeSet<SuffixInfo<S::Repr>>,
390    upkeep: Upkeep,
391}
392
393fn compress(path: &Path) -> io::Result<()> {
394    let dest_path = PathBuf::from(format!("{}.gz", path.display()));
395
396    let mut src_file = File::open(path)?;
397    let dest_file = OpenOptions::new().write(true).create(true).append(false).open(&dest_path)?;
398
399    assert!(path.exists());
400    assert!(dest_path.exists());
401    let mut encoder = GzEncoder::new(dest_file, flate2::Compression::default());
402    io::copy(&mut src_file, &mut encoder)?;
403
404    fs::remove_file(path)?;
405
406    Ok(())
407}
408
409impl<S: SuffixScheme> _Backend<S> {
410    fn new(
411        archive_dir: PathBuf, file: &Path, upkeep: Upkeep, compress: Compression, schema: S,
412    ) -> Self {
413        let base_path = archive_dir.as_path().join(Path::new(file.file_name().unwrap()));
414        let mut s = Self {
415            archive_dir,
416            log_path: file.to_path_buf(),
417            base_path,
418            upkeep,
419            compress,
420            suffix_scheme: schema,
421            suffixes: BTreeSet::new(),
422        };
423        s.ensure_dir();
424        s.scan_suffixes();
425        s
426    }
427
428    #[inline]
429    fn ensure_dir(&self) {
430        if !self.archive_dir.exists() {
431            let _ = fs::create_dir_all(&self.archive_dir).expect("create dir");
432        }
433    }
434
435    #[inline]
436    fn scan_suffixes(&mut self) {
437        self.suffixes = self.suffix_scheme.scan_suffixes(&self.base_path);
438    }
439
440    #[inline]
441    fn rename_files(&mut self) {
442        self.ensure_dir();
443        let new_suffix_info = self._move_file_with_suffix(None).expect("move files");
444        self.suffixes.insert(new_suffix_info);
445    }
446
447    #[inline]
448    fn handle_old_files(&mut self) -> io::Result<()> {
449        // Find the youngest suffix that is too old, and then remove all suffixes that are older or
450        // equally old:
451        // Start from oldest suffix, stop when we find a suffix that is not too old
452        let mut result = Ok(());
453        if let Upkeep::All = &self.upkeep {
454        } else {
455            let mut youngest_old = None;
456            for (i, suffix) in self.suffixes.iter().enumerate().rev() {
457                if self.suffix_scheme.too_old(&suffix.suffix, i) {
458                    result = result.and(fs::remove_file(suffix.to_path(&self.base_path)));
459                    youngest_old = Some((*suffix).clone());
460                } else {
461                    break;
462                }
463            }
464            if let Some(youngest_old) = youngest_old {
465                // Removes all the too old
466                let _ = self.suffixes.split_off(&youngest_old);
467            }
468        }
469
470        // Compression
471        if let Compression::OnRotate(max_file_n) = self.compress {
472            let n = (self.suffixes.len() as i32 - max_file_n as i32).max(0) as usize;
473            // The oldest N files should be compressed
474            let suffixes_to_compress = self
475                .suffixes
476                .iter()
477                .rev()
478                .take(n)
479                .filter(|info| !info.compressed)
480                .cloned()
481                .collect::<Vec<_>>();
482            for info in suffixes_to_compress {
483                // Do the compression
484                let path = info.suffix.to_path(&self.base_path);
485                compress(&path)?;
486
487                self.suffixes.replace(SuffixInfo { compressed: true, ..info });
488            }
489        }
490        result
491    }
492
493    /// Recursive function that keeps moving files if there's any file name collision.
494    /// If `suffix` is `None`, it moves from basepath to next suffix given by the SuffixScheme
495    /// Assumption: Any collision in file name is due to an old log file.
496    ///
497    /// Returns the suffix of the new file (the last suffix after possible cascade of renames).
498    fn _move_file_with_suffix(
499        &mut self, old_suffix_info: Option<SuffixInfo<S::Repr>>,
500    ) -> io::Result<SuffixInfo<S::Repr>> {
501        // NOTE: this newest_suffix is there only because AppendTimestamp specifically needs
502        // it. Otherwise it might not be necessary to provide this to `rotate_file`. We could also
503        // have passed the internal BTreeMap itself, but it would require to make SuffixInfo `pub`.
504        let newest_suffix = self.suffixes.iter().next().map(|info| &info.suffix);
505
506        let new_suffix = self.suffix_scheme.rotate_file(
507            &self.base_path,
508            newest_suffix,
509            &old_suffix_info.clone().map(|i| i.suffix),
510        )?;
511
512        // The destination file/path eventual .gz suffix must match the source path
513        let new_suffix_info = SuffixInfo {
514            suffix: new_suffix,
515            compressed: old_suffix_info.as_ref().map(|x| x.compressed).unwrap_or(false),
516        };
517        let new_path = new_suffix_info.to_path(&self.base_path);
518
519        // Whatever exists that would block a move to the new suffix
520        let existing_suffix_info = self.suffixes.get(&new_suffix_info).cloned();
521
522        // Move destination file out of the way if it exists
523        let newly_created_suffix = if let Some(existing_suffix_info) = existing_suffix_info {
524            // We might move files in a way that the destination path doesn't equal the path that
525            // was replaced. Due to possible `.gz`, a "conflicting" file doesn't mean that paths
526            // are equal.
527            self.suffixes.replace(new_suffix_info);
528            // Recurse to move conflicting file.
529            self._move_file_with_suffix(Some(existing_suffix_info))?
530        } else {
531            new_suffix_info
532        };
533
534        let old_path = match old_suffix_info {
535            Some(suffix) => suffix.to_path(&self.base_path),
536            None => self.log_path.clone(), // When archive_dir and parent of log_path is different
537        };
538        // Do the move
539        assert!(old_path.exists());
540        assert!(!new_path.exists());
541        fs::rename(old_path, new_path)?;
542
543        Ok(newly_created_suffix)
544    }
545}