captains_log/
rotation.rs

1use file_rotate::SuffixInfo;
2use file_rotate::compression::Compression;
3use file_rotate::suffix::{
4    AppendCount, AppendTimestamp, DateFrom, FileLimit, Representation, SuffixScheme,
5};
6use flate2::write::GzEncoder;
7use parking_lot::Mutex;
8use std::cell::UnsafeCell;
9use std::collections::BTreeSet;
10use std::fs::{self, File, OpenOptions};
11use std::io;
12use std::mem::transmute;
13use std::path::{Path, PathBuf};
14use std::sync::Arc;
15use std::thread;
16use std::time::{Duration, SystemTime};
17
18#[derive(Hash, Clone, Copy, PartialEq)]
19pub enum Age {
20    Day,
21    Hour,
22}
23
24#[derive(Hash, Clone, Copy, PartialEq)]
25pub struct ByAge {
26    /// Rotate the file by day / hour.
27    pub age_type: Age,
28
29    /// Similar to system's log-rotate,
30    /// For Age::Day, the latest archive use yesterday's timestamp;
31    /// For Age::Hour, use last hour's timestamp.
32    pub use_last_time: bool,
33}
34
35#[derive(Hash, Clone, Copy, PartialEq)]
36pub enum Upkeep {
37    /// Log file  older than the duration will be deleted.
38    Age(chrono::TimeDelta),
39    /// Only keeps the number of old logs.
40    Count(usize),
41    /// Does not delete any old logs.
42    All,
43}
44
45/// Log rotation configuration.
46///
47/// `by_age` and `by_size` can be configured at the same time, means log will be rotate when any of the conditions met.
48/// It's not valid when `by_age` and `by_size` both None.
49#[derive(Hash)]
50pub struct Rotation {
51    pub by_age: Option<ByAge>,
52    pub by_size: Option<u64>,
53
54    /// If None, archive in `file.<number>` form, and Upkeep::Age will be ignore.
55    ///
56    /// If Some, archive in `file.<datetime>` form.
57    pub time_fmt: Option<&'static str>,
58
59    /// How to cleanup the old file
60    pub upkeep: Upkeep,
61
62    /// Whether to move the log into an archive_dir. if not configured, it's the same dir as
63    /// current log.
64    pub archive_dir: Option<PathBuf>,
65
66    /// When Some(count), indicate how many uncompressed archived logs. When 0, all the archive logs are compressed.
67    /// When None, do not compress archive logs;
68    pub compress_exclude: Option<usize>,
69}
70
71impl Rotation {
72    ///
73    /// Archive log when size is reached.
74    ///
75    /// # Arguments:
76    ///
77    /// - `max_files`: When None, do not delete old files
78    pub fn by_size(size_limit: u64, max_files: Option<usize>) -> Self {
79        let upkeep =
80            if let Some(_max_files) = max_files { Upkeep::Count(_max_files) } else { Upkeep::All };
81        Self {
82            by_age: None,
83            by_size: Some(size_limit),
84            time_fmt: None,
85            upkeep,
86            archive_dir: None,
87            compress_exclude: None,
88        }
89    }
90
91    /// Rotate log by time (Day/Hour)
92    ///
93    /// # Arguments:
94    ///
95    /// - `use_last_time`: Similar to system's log-rotate,
96    ///
97    ///     - For Age::Day, the latest archive use yesterday's timestamp;
98    ///
99    ///     - For Age::Hour, use last hour's timestamp.
100    ///
101    /// - `time_fmt`: timestamp format of the archived files
102    ///
103    /// - `max_time`: Delete archived logs older than the time
104    pub fn by_age(
105        age: Age, use_last_time: bool, time_fmt: &'static str, max_time: Option<chrono::TimeDelta>,
106    ) -> Self {
107        let upkeep =
108            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
109        Self {
110            by_age: Some(ByAge { age_type: age, use_last_time }),
111            by_size: None,
112            time_fmt: Some(time_fmt),
113            upkeep,
114            compress_exclude: None,
115            archive_dir: None,
116        }
117    }
118
119    /// Rotate log when time (Day/Hour) is reached or when size is reached
120    ///
121    /// # Arguments:
122    ///
123    /// - `use_last_time`: Similar to system's log-rotate,
124    ///
125    ///     - For Age::Day, the latest archive use yesterday's timestamp;
126    ///
127    ///     - For Age::Hour, use last hour's timestamp.
128    ///
129    /// - `time_fmt`: timestamp format of the archived files
130    ///
131    /// - `max_time`: Delete archived logs older than the time
132    pub fn by_age_and_size(
133        age: Age, size_limit: u64, use_last_time: bool, time_fmt: &'static str,
134        max_time: Option<chrono::TimeDelta>,
135    ) -> Self {
136        let upkeep =
137            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
138        Self {
139            by_age: Some(ByAge { age_type: age, use_last_time }),
140            by_size: Some(size_limit),
141            time_fmt: Some(time_fmt),
142            upkeep,
143            compress_exclude: None,
144            archive_dir: None,
145        }
146    }
147
148    /// Compress archived logs, with a number of recent files left uncompressed.
149    ///
150    /// When `un_compress_files` == 0, means all files will be compressed.
151    pub fn compress_exclude(mut self, un_compress_files: usize) -> Self {
152        self.compress_exclude.replace(un_compress_files);
153        self
154    }
155
156    /// Move the old logs into an `archive_dir`.
157    pub fn archive_dir<P: Into<PathBuf>>(mut self, archive_dir: P) -> Self {
158        self.archive_dir.replace(archive_dir.into());
159        self
160    }
161
162    pub(crate) fn build(&self, file_path: &Path) -> LogRotate {
163        assert!(
164            self.by_age.is_some() || self.by_size.is_some(),
165            "by_age and by_size can not be both None"
166        );
167        let archive_dir = if let Some(_dir) = &self.archive_dir {
168            _dir.clone()
169        } else {
170            // TODO FIXME
171            file_path.parent().unwrap().to_path_buf()
172        };
173        let mut size = None;
174        let mut age = None;
175        let mut date_from = DateFrom::Now;
176        if let Some(by_age) = &self.by_age {
177            if by_age.use_last_time {
178                match by_age.age_type {
179                    Age::Hour => {
180                        date_from = DateFrom::DateHourAgo;
181                    }
182                    Age::Day => {
183                        date_from = DateFrom::DateYesterday;
184                    }
185                }
186            }
187            age.replace(LimiterAge::new(by_age.age_type));
188        }
189        if let Some(_size) = &self.by_size {
190            size.replace(LimiterSize::new(*_size));
191        }
192        let c = if let Some(compress) = &self.compress_exclude {
193            Compression::OnRotate(*compress)
194        } else {
195            Compression::None
196        };
197        let backend;
198        if let Some(time_fmt) = self.time_fmt {
199            let file_limit = match self.upkeep {
200                Upkeep::Age(d) => FileLimit::Age(d),
201                Upkeep::Count(c) => FileLimit::MaxFiles(c),
202                Upkeep::All => FileLimit::Unlimited,
203            };
204            let schema = AppendTimestamp { format: time_fmt, file_limit, date_from };
205            backend = Backend::Time(UnsafeCell::new(_Backend::new(
206                archive_dir.clone(),
207                file_path,
208                self.upkeep,
209                c,
210                schema,
211            )));
212        } else {
213            let file_limit = match self.upkeep {
214                Upkeep::Age(_) => 0,
215                Upkeep::Count(c) => c,
216                Upkeep::All => 0,
217            };
218            let schema = AppendCount::new(file_limit);
219            backend = Backend::Num(UnsafeCell::new(_Backend::new(
220                archive_dir.clone(),
221                file_path,
222                self.upkeep,
223                c,
224                schema,
225            )));
226        }
227        return LogRotate {
228            size_limit: size,
229            age_limit: age,
230            backend: Arc::new(backend),
231            th: Mutex::new(None),
232        };
233    }
234}
235
236pub(crate) struct LogRotate {
237    size_limit: Option<LimiterSize>,
238    age_limit: Option<LimiterAge>,
239    backend: Arc<Backend>,
240    th: Mutex<Option<thread::JoinHandle<()>>>,
241}
242
243impl LogRotate {
244    pub fn rotate<S: FileSinkTrait>(&self, sink: &S) -> bool {
245        let mut need_rotate = false;
246        if let Some(age) = self.age_limit.as_ref() {
247            if age.check(sink) {
248                need_rotate = true;
249            }
250        }
251        if let Some(size) = self.size_limit.as_ref() {
252            if size.check(sink) {
253                need_rotate = true;
254            }
255        }
256        if need_rotate == false {
257            return false;
258        }
259        self.wait();
260
261        self.backend.rename_files();
262        let backend = self.backend.clone();
263        let th = thread::spawn(move || {
264            let _ = backend.handle_old_files();
265        });
266        self.th.lock().replace(th);
267        true
268    }
269
270    /// Wait for the last handle_old_files to finish.
271    pub fn wait(&self) {
272        if let Some(th) = self.th.lock().take() {
273            let _ = th.join();
274        }
275    }
276}
277
278pub(crate) struct LimiterSize {
279    limit: u64,
280}
281
282impl LimiterSize {
283    pub fn new(size: u64) -> Self {
284        Self { limit: size }
285    }
286
287    #[inline]
288    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
289        return sink.get_size() > self.limit;
290    }
291}
292
293pub(crate) struct LimiterAge {
294    limit: Duration,
295}
296
297impl LimiterAge {
298    pub fn new(limit: Age) -> Self {
299        Self {
300            limit: match limit {
301                Age::Hour => Duration::from_secs(60 * 60),
302                Age::Day => Duration::from_secs(24 * 60 * 60),
303            },
304        }
305    }
306
307    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
308        let now = SystemTime::now();
309        let start_ts = sink.get_create_time();
310        match now.duration_since(start_ts) {
311            Ok(d) => return d > self.limit,
312            Err(_) => return true, // system time rotate back
313        }
314    }
315}
316
317pub(crate) trait FileSinkTrait {
318    fn get_create_time(&self) -> SystemTime;
319
320    fn get_size(&self) -> u64;
321}
322
323enum Backend {
324    Num(UnsafeCell<_Backend<AppendCount>>),
325    Time(UnsafeCell<_Backend<AppendTimestamp>>),
326}
327
328unsafe impl Send for Backend {}
329unsafe impl Sync for Backend {}
330
331impl Backend {
332    fn rename_files(&self) {
333        match self {
334            Self::Num(_inner) => {
335                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
336                inner.rename_files();
337            }
338            Self::Time(_inner) => {
339                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
340                inner.rename_files();
341            }
342        }
343    }
344
345    fn handle_old_files(&self) -> io::Result<()> {
346        match self {
347            Self::Num(_inner) => {
348                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
349                inner.handle_old_files()
350            }
351            Self::Time(_inner) => {
352                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
353                inner.handle_old_files()
354            }
355        }
356    }
357}
358
359/// Adaptation to file-rotate crate (Copyright (c) 2020 BourgondAries, MIT license)
360struct _Backend<S: SuffixScheme> {
361    archive_dir: PathBuf,
362    base_path: PathBuf, // log_path replaced parent with archive_dir
363    log_path: PathBuf,  // current log
364    compress: Compression,
365    suffix_scheme: S,
366    /// The bool is whether or not there's a .gz suffix to the filename
367    suffixes: BTreeSet<SuffixInfo<S::Repr>>,
368    upkeep: Upkeep,
369}
370
371fn compress(path: &Path) -> io::Result<()> {
372    let dest_path = PathBuf::from(format!("{}.gz", path.display()));
373
374    let mut src_file = File::open(path)?;
375    let dest_file = OpenOptions::new().write(true).create(true).append(false).open(&dest_path)?;
376
377    assert!(path.exists());
378    assert!(dest_path.exists());
379    let mut encoder = GzEncoder::new(dest_file, flate2::Compression::default());
380    io::copy(&mut src_file, &mut encoder)?;
381
382    fs::remove_file(path)?;
383
384    Ok(())
385}
386
387impl<S: SuffixScheme> _Backend<S> {
388    fn new(
389        archive_dir: PathBuf, file: &Path, upkeep: Upkeep, compress: Compression, schema: S,
390    ) -> Self {
391        let base_path = archive_dir.as_path().join(Path::new(file.file_name().unwrap()));
392        let mut s = Self {
393            archive_dir,
394            log_path: file.to_path_buf(),
395            base_path,
396            upkeep,
397            compress,
398            suffix_scheme: schema,
399            suffixes: BTreeSet::new(),
400        };
401        s.ensure_dir();
402        s.scan_suffixes();
403        s
404    }
405
406    #[inline]
407    fn ensure_dir(&self) {
408        if !self.archive_dir.exists() {
409            let _ = fs::create_dir_all(&self.archive_dir).expect("create dir");
410        }
411    }
412
413    #[inline]
414    fn scan_suffixes(&mut self) {
415        self.suffixes = self.suffix_scheme.scan_suffixes(&self.base_path);
416    }
417
418    #[inline]
419    fn rename_files(&mut self) {
420        self.ensure_dir();
421        let new_suffix_info = self._move_file_with_suffix(None).expect("move files");
422        self.suffixes.insert(new_suffix_info);
423    }
424
425    #[inline]
426    fn handle_old_files(&mut self) -> io::Result<()> {
427        // Find the youngest suffix that is too old, and then remove all suffixes that are older or
428        // equally old:
429        // Start from oldest suffix, stop when we find a suffix that is not too old
430        let mut result = Ok(());
431        if let Upkeep::All = &self.upkeep {
432        } else {
433            let mut youngest_old = None;
434            for (i, suffix) in self.suffixes.iter().enumerate().rev() {
435                if self.suffix_scheme.too_old(&suffix.suffix, i) {
436                    result = result.and(fs::remove_file(suffix.to_path(&self.base_path)));
437                    youngest_old = Some((*suffix).clone());
438                } else {
439                    break;
440                }
441            }
442            if let Some(youngest_old) = youngest_old {
443                // Removes all the too old
444                let _ = self.suffixes.split_off(&youngest_old);
445            }
446        }
447
448        // Compression
449        if let Compression::OnRotate(max_file_n) = self.compress {
450            let n = (self.suffixes.len() as i32 - max_file_n as i32).max(0) as usize;
451            // The oldest N files should be compressed
452            let suffixes_to_compress = self
453                .suffixes
454                .iter()
455                .rev()
456                .take(n)
457                .filter(|info| !info.compressed)
458                .cloned()
459                .collect::<Vec<_>>();
460            for info in suffixes_to_compress {
461                // Do the compression
462                let path = info.suffix.to_path(&self.base_path);
463                compress(&path)?;
464
465                self.suffixes.replace(SuffixInfo { compressed: true, ..info });
466            }
467        }
468        result
469    }
470
471    /// Recursive function that keeps moving files if there's any file name collision.
472    /// If `suffix` is `None`, it moves from basepath to next suffix given by the SuffixScheme
473    /// Assumption: Any collision in file name is due to an old log file.
474    ///
475    /// Returns the suffix of the new file (the last suffix after possible cascade of renames).
476    fn _move_file_with_suffix(
477        &mut self, old_suffix_info: Option<SuffixInfo<S::Repr>>,
478    ) -> io::Result<SuffixInfo<S::Repr>> {
479        // NOTE: this newest_suffix is there only because AppendTimestamp specifically needs
480        // it. Otherwise it might not be necessary to provide this to `rotate_file`. We could also
481        // have passed the internal BTreeMap itself, but it would require to make SuffixInfo `pub`.
482        let newest_suffix = self.suffixes.iter().next().map(|info| &info.suffix);
483
484        let new_suffix = self.suffix_scheme.rotate_file(
485            &self.base_path,
486            newest_suffix,
487            &old_suffix_info.clone().map(|i| i.suffix),
488        )?;
489
490        // The destination file/path eventual .gz suffix must match the source path
491        let new_suffix_info = SuffixInfo {
492            suffix: new_suffix,
493            compressed: old_suffix_info.as_ref().map(|x| x.compressed).unwrap_or(false),
494        };
495        let new_path = new_suffix_info.to_path(&self.base_path);
496
497        // Whatever exists that would block a move to the new suffix
498        let existing_suffix_info = self.suffixes.get(&new_suffix_info).cloned();
499
500        // Move destination file out of the way if it exists
501        let newly_created_suffix = if let Some(existing_suffix_info) = existing_suffix_info {
502            // We might move files in a way that the destination path doesn't equal the path that
503            // was replaced. Due to possible `.gz`, a "conflicting" file doesn't mean that paths
504            // are equal.
505            self.suffixes.replace(new_suffix_info);
506            // Recurse to move conflicting file.
507            self._move_file_with_suffix(Some(existing_suffix_info))?
508        } else {
509            new_suffix_info
510        };
511
512        let old_path = match old_suffix_info {
513            Some(suffix) => suffix.to_path(&self.base_path),
514            None => self.log_path.clone(), // When archive_dir and parent of log_path is different
515        };
516        // Do the move
517        assert!(old_path.exists());
518        assert!(!new_path.exists());
519        fs::rename(old_path, new_path)?;
520
521        Ok(newly_created_suffix)
522    }
523}