captains_log/
rotation.rs

1use file_rotate::SuffixInfo;
2use file_rotate::compression::Compression;
3use file_rotate::suffix::{
4    AppendCount, AppendTimestamp, DateFrom, FileLimit, Representation, SuffixScheme,
5};
6use flate2::write::GzEncoder;
7use parking_lot::Mutex;
8use std::cell::UnsafeCell;
9use std::collections::BTreeSet;
10use std::fs::{self, File, OpenOptions};
11use std::io;
12use std::mem::transmute;
13use std::path::{Path, PathBuf};
14use std::sync::Arc;
15use std::thread;
16use std::time::{Duration, SystemTime};
17
18#[derive(Hash, Clone, Copy, PartialEq)]
19pub enum Age {
20    Day,
21    Hour,
22}
23
24#[derive(Hash, Clone, Copy, PartialEq)]
25pub struct ByAge {
26    /// Rotate the file by day / hour.
27    pub age_type: Age,
28
29    /// Similar to system's log-rotate,
30    /// For Age::Day, the latest archive use yesterday's timestamp;
31    /// For Age::Hour, use last hour's timestamp.
32    pub use_last_time: bool,
33}
34
35#[derive(Hash, Clone, Copy, PartialEq)]
36pub enum Upkeep {
37    /// Log file  older than the duration will be deleted.
38    Age(chrono::TimeDelta),
39    /// Only keeps the number of old logs.
40    Count(usize),
41    /// Does not delete any old logs.
42    All,
43}
44
45/// Log rotation configuration.
46///
47/// `by_age` and `by_size` can be configured at the same time, means log will be rotate when any of the conditions met.
48/// It's not valid when `by_age` and `by_size` both None.
49#[derive(Hash)]
50pub struct Rotation {
51    pub by_age: Option<ByAge>,
52    pub by_size: Option<u64>,
53
54    /// If None, archive in `file.<number>` form, and Upkeep::Age will be ignore.
55    ///
56    /// If Some, archive in `file.<datetime>` form.
57    pub time_fmt: Option<&'static str>,
58
59    /// How to cleanup the old file
60    pub upkeep: Upkeep,
61
62    /// Whether to move the log into an archive_dir. if not configured, it's the same dir as
63    /// current log.
64    pub archive_dir: Option<PathBuf>,
65
66    /// When Some(count), indicate how many uncompressed archived logs. When 0, all the archive logs are compressed.
67    /// When None, do not compress archive logs;
68    pub compress_exclude: Option<usize>,
69}
70
71impl Rotation {
72    /// max_files: When None, do not delete old files
73    pub fn by_size(size_limit: u64, max_files: Option<usize>) -> Self {
74        let upkeep =
75            if let Some(_max_files) = max_files { Upkeep::Count(_max_files) } else { Upkeep::All };
76        Self {
77            by_age: None,
78            by_size: Some(size_limit),
79            time_fmt: None,
80            upkeep,
81            archive_dir: None,
82            compress_exclude: None,
83        }
84    }
85
86    pub fn by_age(
87        age: Age, use_last_time: bool, time_fmt: &'static str, max_time: Option<chrono::TimeDelta>,
88    ) -> Self {
89        let upkeep =
90            if let Some(_max_time) = max_time { Upkeep::Age(_max_time) } else { Upkeep::All };
91        Self {
92            by_age: Some(ByAge { age_type: age, use_last_time }),
93            by_size: None,
94            time_fmt: Some(time_fmt),
95            upkeep,
96            compress_exclude: None,
97            archive_dir: None,
98        }
99    }
100
101    /// Compress archived logs, with a number of recent files left uncompressed
102    pub fn compress_exclude(mut self, un_compress_files: usize) -> Self {
103        self.compress_exclude.replace(un_compress_files);
104        self
105    }
106
107    /// Move the old logs into an `archive_dir`.
108    pub fn archive_dir<P: Into<PathBuf>>(mut self, archive_dir: P) -> Self {
109        self.archive_dir.replace(archive_dir.into());
110        self
111    }
112
113    pub(crate) fn build(&self, file_path: &Path) -> LogRotate {
114        assert!(
115            self.by_age.is_some() || self.by_size.is_some(),
116            "by_age and by_size can not be both None"
117        );
118        let archive_dir = if let Some(_dir) = &self.archive_dir {
119            _dir.clone()
120        } else {
121            // TODO FIXME
122            file_path.parent().unwrap().to_path_buf()
123        };
124        let mut size = None;
125        let mut age = None;
126        let mut date_from = DateFrom::Now;
127        if let Some(by_age) = &self.by_age {
128            if by_age.use_last_time {
129                match by_age.age_type {
130                    Age::Hour => {
131                        date_from = DateFrom::DateHourAgo;
132                    }
133                    Age::Day => {
134                        date_from = DateFrom::DateYesterday;
135                    }
136                }
137            }
138            age.replace(LimiterAge::new(by_age.age_type));
139        }
140        if let Some(_size) = &self.by_size {
141            size.replace(LimiterSize::new(*_size));
142        }
143        let c = if let Some(compress) = &self.compress_exclude {
144            Compression::OnRotate(*compress)
145        } else {
146            Compression::None
147        };
148        let backend;
149        if let Some(time_fmt) = self.time_fmt {
150            let file_limit = match self.upkeep {
151                Upkeep::Age(d) => FileLimit::Age(d),
152                Upkeep::Count(c) => FileLimit::MaxFiles(c),
153                Upkeep::All => FileLimit::Unlimited,
154            };
155            let schema = AppendTimestamp { format: time_fmt, file_limit, date_from };
156            backend = Backend::Time(UnsafeCell::new(_Backend::new(
157                archive_dir.clone(),
158                file_path,
159                self.upkeep,
160                c,
161                schema,
162            )));
163        } else {
164            let file_limit = match self.upkeep {
165                Upkeep::Age(_) => 0,
166                Upkeep::Count(c) => c,
167                Upkeep::All => 0,
168            };
169            let schema = AppendCount::new(file_limit);
170            backend = Backend::Num(UnsafeCell::new(_Backend::new(
171                archive_dir.clone(),
172                file_path,
173                self.upkeep,
174                c,
175                schema,
176            )));
177        }
178        return LogRotate {
179            size_limit: size,
180            age_limit: age,
181            backend: Arc::new(backend),
182            th: Mutex::new(None),
183        };
184    }
185}
186
187pub(crate) struct LogRotate {
188    size_limit: Option<LimiterSize>,
189    age_limit: Option<LimiterAge>,
190    backend: Arc<Backend>,
191    th: Mutex<Option<thread::JoinHandle<()>>>,
192}
193
194impl LogRotate {
195    pub fn rotate<S: FileSinkTrait>(&self, sink: &S) -> bool {
196        let mut need_rotate = false;
197        if let Some(age) = self.age_limit.as_ref() {
198            if age.check(sink) {
199                need_rotate = true;
200            }
201        }
202        if let Some(size) = self.size_limit.as_ref() {
203            if size.check(sink) {
204                need_rotate = true;
205            }
206        }
207        if need_rotate == false {
208            return false;
209        }
210        self.wait();
211
212        self.backend.rename_files();
213        let backend = self.backend.clone();
214        let th = thread::spawn(move || {
215            let _ = backend.handle_old_files();
216        });
217        self.th.lock().replace(th);
218        true
219    }
220
221    /// Wait for the last handle_old_files to finish.
222    pub fn wait(&self) {
223        if let Some(th) = self.th.lock().take() {
224            let _ = th.join();
225        }
226    }
227}
228
229pub(crate) struct LimiterSize {
230    limit: u64,
231}
232
233impl LimiterSize {
234    pub fn new(size: u64) -> Self {
235        Self { limit: size }
236    }
237
238    #[inline]
239    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
240        return sink.get_size() > self.limit;
241    }
242}
243
244pub(crate) struct LimiterAge {
245    limit: Duration,
246}
247
248impl LimiterAge {
249    pub fn new(limit: Age) -> Self {
250        Self {
251            limit: match limit {
252                Age::Hour => Duration::from_secs(60 * 60),
253                Age::Day => Duration::from_secs(24 * 60 * 60),
254            },
255        }
256    }
257
258    pub fn check<S: FileSinkTrait>(&self, sink: &S) -> bool {
259        let now = SystemTime::now();
260        let start_ts = sink.get_create_time();
261        match now.duration_since(start_ts) {
262            Ok(d) => return d > self.limit,
263            Err(_) => return true, // system time rotate back
264        }
265    }
266}
267
268pub(crate) trait FileSinkTrait {
269    fn get_create_time(&self) -> SystemTime;
270
271    fn get_size(&self) -> u64;
272}
273
274enum Backend {
275    Num(UnsafeCell<_Backend<AppendCount>>),
276    Time(UnsafeCell<_Backend<AppendTimestamp>>),
277}
278
279unsafe impl Send for Backend {}
280unsafe impl Sync for Backend {}
281
282impl Backend {
283    fn rename_files(&self) {
284        match self {
285            Self::Num(_inner) => {
286                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
287                inner.rename_files();
288            }
289            Self::Time(_inner) => {
290                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
291                inner.rename_files();
292            }
293        }
294    }
295
296    fn handle_old_files(&self) -> io::Result<()> {
297        match self {
298            Self::Num(_inner) => {
299                let inner: &mut _Backend<AppendCount> = unsafe { transmute(_inner.get()) };
300                inner.handle_old_files()
301            }
302            Self::Time(_inner) => {
303                let inner: &mut _Backend<AppendTimestamp> = unsafe { transmute(_inner.get()) };
304                inner.handle_old_files()
305            }
306        }
307    }
308}
309
310/// Adaptation to file-rotate crate (Copyright (c) 2020 BourgondAries, MIT license)
311struct _Backend<S: SuffixScheme> {
312    archive_dir: PathBuf,
313    base_path: PathBuf, // log_path replaced parent with archive_dir
314    log_path: PathBuf,  // current log
315    compress: Compression,
316    suffix_scheme: S,
317    /// The bool is whether or not there's a .gz suffix to the filename
318    suffixes: BTreeSet<SuffixInfo<S::Repr>>,
319    upkeep: Upkeep,
320}
321
322fn compress(path: &Path) -> io::Result<()> {
323    let dest_path = PathBuf::from(format!("{}.gz", path.display()));
324
325    let mut src_file = File::open(path)?;
326    let dest_file = OpenOptions::new().write(true).create(true).append(false).open(&dest_path)?;
327
328    assert!(path.exists());
329    assert!(dest_path.exists());
330    let mut encoder = GzEncoder::new(dest_file, flate2::Compression::default());
331    io::copy(&mut src_file, &mut encoder)?;
332
333    fs::remove_file(path)?;
334
335    Ok(())
336}
337
338impl<S: SuffixScheme> _Backend<S> {
339    fn new(
340        archive_dir: PathBuf, file: &Path, upkeep: Upkeep, compress: Compression, schema: S,
341    ) -> Self {
342        let base_path = archive_dir.as_path().join(Path::new(file.file_name().unwrap()));
343        let mut s = Self {
344            archive_dir,
345            log_path: file.to_path_buf(),
346            base_path,
347            upkeep,
348            compress,
349            suffix_scheme: schema,
350            suffixes: BTreeSet::new(),
351        };
352        s.ensure_dir();
353        s.scan_suffixes();
354        s
355    }
356
357    #[inline]
358    fn ensure_dir(&self) {
359        if !self.archive_dir.exists() {
360            let _ = fs::create_dir_all(&self.archive_dir).expect("create dir");
361        }
362    }
363
364    #[inline]
365    fn scan_suffixes(&mut self) {
366        self.suffixes = self.suffix_scheme.scan_suffixes(&self.base_path);
367    }
368
369    #[inline]
370    fn rename_files(&mut self) {
371        self.ensure_dir();
372        let new_suffix_info = self._move_file_with_suffix(None).expect("move files");
373        self.suffixes.insert(new_suffix_info);
374    }
375
376    #[inline]
377    fn handle_old_files(&mut self) -> io::Result<()> {
378        // Find the youngest suffix that is too old, and then remove all suffixes that are older or
379        // equally old:
380        // Start from oldest suffix, stop when we find a suffix that is not too old
381        let mut result = Ok(());
382        if let Upkeep::All = &self.upkeep {
383        } else {
384            let mut youngest_old = None;
385            for (i, suffix) in self.suffixes.iter().enumerate().rev() {
386                if self.suffix_scheme.too_old(&suffix.suffix, i) {
387                    result = result.and(fs::remove_file(suffix.to_path(&self.base_path)));
388                    youngest_old = Some((*suffix).clone());
389                } else {
390                    break;
391                }
392            }
393            if let Some(youngest_old) = youngest_old {
394                // Removes all the too old
395                let _ = self.suffixes.split_off(&youngest_old);
396            }
397        }
398
399        // Compression
400        if let Compression::OnRotate(max_file_n) = self.compress {
401            let n = (self.suffixes.len() as i32 - max_file_n as i32).max(0) as usize;
402            // The oldest N files should be compressed
403            let suffixes_to_compress = self
404                .suffixes
405                .iter()
406                .rev()
407                .take(n)
408                .filter(|info| !info.compressed)
409                .cloned()
410                .collect::<Vec<_>>();
411            for info in suffixes_to_compress {
412                // Do the compression
413                let path = info.suffix.to_path(&self.base_path);
414                compress(&path)?;
415
416                self.suffixes.replace(SuffixInfo { compressed: true, ..info });
417            }
418        }
419        result
420    }
421
422    /// Recursive function that keeps moving files if there's any file name collision.
423    /// If `suffix` is `None`, it moves from basepath to next suffix given by the SuffixScheme
424    /// Assumption: Any collision in file name is due to an old log file.
425    ///
426    /// Returns the suffix of the new file (the last suffix after possible cascade of renames).
427    fn _move_file_with_suffix(
428        &mut self, old_suffix_info: Option<SuffixInfo<S::Repr>>,
429    ) -> io::Result<SuffixInfo<S::Repr>> {
430        // NOTE: this newest_suffix is there only because AppendTimestamp specifically needs
431        // it. Otherwise it might not be necessary to provide this to `rotate_file`. We could also
432        // have passed the internal BTreeMap itself, but it would require to make SuffixInfo `pub`.
433        let newest_suffix = self.suffixes.iter().next().map(|info| &info.suffix);
434
435        let new_suffix = self.suffix_scheme.rotate_file(
436            &self.base_path,
437            newest_suffix,
438            &old_suffix_info.clone().map(|i| i.suffix),
439        )?;
440
441        // The destination file/path eventual .gz suffix must match the source path
442        let new_suffix_info = SuffixInfo {
443            suffix: new_suffix,
444            compressed: old_suffix_info.as_ref().map(|x| x.compressed).unwrap_or(false),
445        };
446        let new_path = new_suffix_info.to_path(&self.base_path);
447
448        // Whatever exists that would block a move to the new suffix
449        let existing_suffix_info = self.suffixes.get(&new_suffix_info).cloned();
450
451        // Move destination file out of the way if it exists
452        let newly_created_suffix = if let Some(existing_suffix_info) = existing_suffix_info {
453            // We might move files in a way that the destination path doesn't equal the path that
454            // was replaced. Due to possible `.gz`, a "conflicting" file doesn't mean that paths
455            // are equal.
456            self.suffixes.replace(new_suffix_info);
457            // Recurse to move conflicting file.
458            self._move_file_with_suffix(Some(existing_suffix_info))?
459        } else {
460            new_suffix_info
461        };
462
463        let old_path = match old_suffix_info {
464            Some(suffix) => suffix.to_path(&self.base_path),
465            None => self.log_path.clone(), // When archive_dir and parent of log_path is different
466        };
467        // Do the move
468        assert!(old_path.exists());
469        assert!(!new_path.exists());
470        fs::rename(old_path, new_path)?;
471
472        Ok(newly_created_suffix)
473    }
474}