file_rotate/
lib.rs

1//! Write output to a file and rotate the files when limits have been exceeded.
2//!
3//! Defines a simple [std::io::Write] object that you can plug into your writers as middleware.
4//!
5//! # Content limit #
6//!
7//! [ContentLimit] specifies at what point a log file has to be rotated.
8//!
9//! ## Rotating by Lines ##
10//!
11//! We can rotate log files with the amount of lines as a limit, by using [ContentLimit::Lines].
12//!
13//! ```
14//! use file_rotate::{FileRotate, ContentLimit, suffix::AppendCount, compression::Compression};
15//! use std::{fs, io::Write};
16//!
17//! // Create a new log writer. The first argument is anything resembling a path. The
18//! // basename is used for naming the log files.
19//! //
20//! // Here we choose to limit logs by 10 lines, and have at most 2 rotated log files. This
21//! // makes the total amount of log files 3, since the original file is present as well.
22//!
23//! # let directory = tempfile::TempDir::new().unwrap();
24//! # let directory = directory.path();
25//! let log_path = directory.join("my-log-file");
26//!
27//! let mut log = FileRotate::new(
28//!     log_path.clone(),
29//!     AppendCount::new(2),
30//!     ContentLimit::Lines(3),
31//!     Compression::None,
32//!     None,
33//! );
34//!
35//! // Write a bunch of lines
36//! writeln!(log, "Line 1: Hello World!");
37//! for idx in 2..11 {
38//!     writeln!(log, "Line {}", idx);
39//! }
40//!
41//! assert_eq!("Line 10\n", fs::read_to_string(&log_path).unwrap());
42//!
43//! assert_eq!("Line 4\nLine 5\nLine 6\n", fs::read_to_string(&directory.join("my-log-file.2")).unwrap());
44//! assert_eq!("Line 7\nLine 8\nLine 9\n", fs::read_to_string(&directory.join("my-log-file.1")).unwrap());
45//! ```
46//!
47//! ## Rotating by Bytes ##
48//!
49//! Another method of rotation is by bytes instead of lines, with [ContentLimit::Bytes].
50//!
51//! ```
52//! use file_rotate::{FileRotate, ContentLimit, suffix::AppendCount, compression::Compression};
53//! use std::{fs, io::Write};
54//!
55//! # let directory = tempfile::TempDir::new().unwrap();
56//! # let directory = directory.path();
57//! let log_path = directory.join("my-log-file");
58//!
59//! let mut log = FileRotate::new(
60//!     "target/my-log-directory-bytes/my-log-file",
61//!     AppendCount::new(2),
62//!     ContentLimit::Bytes(5),
63//!     Compression::None,
64//!     None,
65//! );
66//!
67//! writeln!(log, "Test file");
68//!
69//! assert_eq!("Test ", fs::read_to_string(&log.log_paths()[0]).unwrap());
70//! assert_eq!("file\n", fs::read_to_string("target/my-log-directory-bytes/my-log-file").unwrap());
71//!
72//! fs::remove_dir_all("target/my-log-directory-bytes");
73//! ```
74//!
75//! # Rotation Method #
76//!
77//! Two rotation methods are provided, but any behaviour can be implemented with the [SuffixScheme]
78//! trait.
79//!
80//! ## Basic count ##
81//!
82//! With [AppendCount], when the limit is reached in the main log file, the file is moved with
83//! suffix `.1`, and subsequently numbered files are moved in a cascade.
84//!
85//! Here's an example with 1 byte limits:
86//!
87//! ```
88//! use file_rotate::{FileRotate, ContentLimit, suffix::AppendCount, compression::Compression};
89//! use std::{fs, io::Write};
90//!
91//! # let directory = tempfile::TempDir::new().unwrap();
92//! # let directory = directory.path();
93//! let log_path = directory.join("my-log-file");
94//!
95//! let mut log = FileRotate::new(
96//!     log_path.clone(),
97//!     AppendCount::new(3),
98//!     ContentLimit::Bytes(1),
99//!     Compression::None,
100//!     None,
101//! );
102//!
103//! write!(log, "A");
104//! assert_eq!("A", fs::read_to_string(&log_path).unwrap());
105//!
106//! write!(log, "B");
107//! assert_eq!("A", fs::read_to_string(directory.join("my-log-file.1")).unwrap());
108//! assert_eq!("B", fs::read_to_string(&log_path).unwrap());
109//!
110//! write!(log, "C");
111//! assert_eq!("A", fs::read_to_string(directory.join("my-log-file.2")).unwrap());
112//! assert_eq!("B", fs::read_to_string(directory.join("my-log-file.1")).unwrap());
113//! assert_eq!("C", fs::read_to_string(&log_path).unwrap());
114//!
115//! write!(log, "D");
116//! assert_eq!("A", fs::read_to_string(directory.join("my-log-file.3")).unwrap());
117//! assert_eq!("B", fs::read_to_string(directory.join("my-log-file.2")).unwrap());
118//! assert_eq!("C", fs::read_to_string(directory.join("my-log-file.1")).unwrap());
119//! assert_eq!("D", fs::read_to_string(&log_path).unwrap());
120//!
121//! write!(log, "E");
122//! assert_eq!("B", fs::read_to_string(directory.join("my-log-file.3")).unwrap());
123//! assert_eq!("C", fs::read_to_string(directory.join("my-log-file.2")).unwrap());
124//! assert_eq!("D", fs::read_to_string(directory.join("my-log-file.1")).unwrap());
125//! assert_eq!("E", fs::read_to_string(&log_path).unwrap());
126//! ```
127//!
128//! ## Timestamp suffix ##
129//!
130//! With [AppendTimestamp], when the limit is reached in the main log file, the file is moved with
131//! suffix equal to the current timestamp (with the specified or a default format). If the
132//! destination file name already exists, `.1` (and up) is appended.
133//!
134//! Note that this works somewhat different to `AppendCount` because of lexical ordering concerns:
135//! Higher numbers mean more recent logs, whereas `AppendCount` works in the opposite way.
136//! The reason for this is to keep the lexical ordering of log names consistent: Higher lexical value
137//! means more recent.
138//! This is of course all assuming that the format start with the year (or most significant
139//! component).
140//!
141//! With this suffix scheme, you can also decide whether to delete old files based on the age of
142//! their timestamp ([FileLimit::Age]), or just maximum number of files ([FileLimit::MaxFiles]).
143//!
144//! ```
145//! use file_rotate::{FileRotate, ContentLimit, suffix::{AppendTimestamp, FileLimit},
146//! compression::Compression};
147//! use std::{fs, io::Write};
148//!
149//! # let directory = tempfile::TempDir::new().unwrap();
150//! # let directory = directory.path();
151//! let log_path = directory.join("my-log-file");
152//!
153//! let mut log = FileRotate::new(
154//!     log_path.clone(),
155//!     AppendTimestamp::default(FileLimit::MaxFiles(2)),
156//!     ContentLimit::Bytes(1),
157//!     Compression::None,
158//!     None,
159//! );
160//!
161//! write!(log, "A");
162//! assert_eq!("A", fs::read_to_string(&log_path).unwrap());
163//!
164//! write!(log, "B");
165//! assert_eq!("A", fs::read_to_string(&log.log_paths()[0]).unwrap());
166//! assert_eq!("B", fs::read_to_string(&log_path).unwrap());
167//!
168//! write!(log, "C");
169//! assert_eq!("A", fs::read_to_string(&log.log_paths()[0]).unwrap());
170//! assert_eq!("B", fs::read_to_string(&log.log_paths()[1]).unwrap());
171//! assert_eq!("C", fs::read_to_string(&log_path).unwrap());
172//!
173//! write!(log, "D");
174//! assert_eq!("B", fs::read_to_string(&log.log_paths()[0]).unwrap());
175//! assert_eq!("C", fs::read_to_string(&log.log_paths()[1]).unwrap());
176//! assert_eq!("D", fs::read_to_string(&log_path).unwrap());
177//! ```
178//!
179//! If you use timestamps as suffix, you can also configure files to be removed as they reach a
180//! certain age. For example:
181//! ```rust
182//! use file_rotate::suffix::{AppendTimestamp, FileLimit};
183//! AppendTimestamp::default(FileLimit::Age(chrono::Duration::weeks(1)));
184//! ```
185//!
186//! # Compression #
187//!
188//! Select a [Compression] mode to make the file rotater compress old files using flate2.
189//! Compressed files get an additional suffix `.gz` after the main suffix.
190//!
191//! ## Compression example ##
192//! If we run this:
193//!
194//! ```ignore
195//! use file_rotate::{compression::*, suffix::*, *};
196//! use std::io::Write;
197//!
198//! let mut log = FileRotate::new(
199//!     "./log",
200//!     AppendTimestamp::default(FileLimit::MaxFiles(4)),
201//!     ContentLimit::Bytes(1),
202//!     Compression::OnRotate(2),
203//!     None,
204//! );
205//!
206//! for i in 0..6 {
207//!     write!(log, "{}", i).unwrap();
208//!     std::thread::sleep(std::time::Duration::from_secs(1));
209//! }
210//! ```
211//! The following files will be created:
212//! ```ignore
213//! log  log.20220112T112415.gz  log.20220112T112416.gz  log.20220112T112417  log.20220112T112418
214//! ```
215//! And we can assemble all the available log data with:
216//! ```ignore
217//! $ gunzip -c log.20220112T112415.gz  ; gunzip -c log.20220112T112416.gz ; cat log.20220112T112417 log.20220112T112418 log
218//! 12345
219//! ```
220//!
221//!
222//! ## Get structured list of log files ##
223//!
224//! We can programmatically get the list of log files.
225//! The following code scans the current directory and recognizes log files based on their file name:
226//!
227//! ```
228//! # use file_rotate::{suffix::*, *};
229//! # use std::path::Path;
230//! println!(
231//!     "{:#?}",
232//!     AppendTimestamp::default(FileLimit::MaxFiles(4)).scan_suffixes(Path::new("./log"))
233//! );
234//! ```
235//!
236//! [SuffixScheme::scan_suffixes] also takes into account the possibility of the extra `.gz` suffix, and
237//! interprets it correctly as compression. The output:
238//!
239//! ```ignore
240//! {
241//!     SuffixInfo {
242//!         suffix: TimestampSuffix {
243//!             timestamp: "20220112T112418",
244//!             number: None,
245//!         },
246//!         compressed: false,
247//!     },
248//!     SuffixInfo {
249//!         suffix: TimestampSuffix {
250//!             timestamp: "20220112T112417",
251//!             number: None,
252//!         },
253//!         compressed: false,
254//!     },
255//!     SuffixInfo {
256//!         suffix: TimestampSuffix {
257//!             timestamp: "20220112T112416",
258//!             number: None,
259//!         },
260//!         compressed: true,
261//!     },
262//!     SuffixInfo {
263//!         suffix: TimestampSuffix {
264//!             timestamp: "20220112T112415",
265//!             number: None,
266//!         },
267//!         compressed: true,
268//!     },
269//! }
270//! ```
271//! This information can be used by for example a program to assemble log history.
272//!
273//! # Filesystem Errors #
274//!
275//! If the directory containing the logs is deleted or somehow made inaccessible then the rotator
276//! will simply continue operating without fault. When a rotation occurs, it attempts to open a
277//! file in the directory. If it can, it will just continue logging. If it can't then the written
278//! data is sent to the void.
279
280#![deny(
281    missing_docs,
282    trivial_casts,
283    trivial_numeric_casts,
284    unsafe_code,
285    unused_import_braces,
286    unused_qualifications
287)]
288
289use chrono::prelude::*;
290use compression::*;
291use std::io::{BufRead, BufReader};
292use std::{
293    cmp::Ordering,
294    collections::BTreeSet,
295    fs::{self, File, OpenOptions},
296    io::{self, Write},
297    path::{Path, PathBuf},
298};
299use suffix::*;
300
301pub mod compression;
302pub mod suffix;
303#[cfg(test)]
304mod tests;
305
306// ---
307
308/// At which frequency to rotate the file.
309#[derive(Clone, Copy, Debug)]
310pub enum TimeFrequency {
311    /// Rotate every hour.
312    Hourly,
313    /// Rotate one time a day.
314    Daily,
315    /// Rotate ones a week.
316    Weekly,
317    /// Rotate every month.
318    Monthly,
319    /// Rotate yearly.
320    Yearly,
321}
322
323/// When to move files: Condition on which a file is rotated.
324#[derive(Clone, Debug)]
325pub enum ContentLimit {
326    /// Cut the log at the exact size in bytes.
327    Bytes(usize),
328    /// Cut the log file at line breaks.
329    Lines(usize),
330    /// Cut the log at time interval.
331    Time(TimeFrequency),
332    /// Cut the log file after surpassing size in bytes (but having written a complete buffer from a write call.)
333    BytesSurpassed(usize),
334    /// Don't do any rotation automatically
335    None,
336}
337
338/// Used mostly internally. Info about suffix + compressed state.
339#[derive(Clone, Debug, Eq)]
340pub struct SuffixInfo<Repr> {
341    /// Suffix
342    pub suffix: Repr,
343    /// Whether there is a `.gz` suffix after the suffix
344    pub compressed: bool,
345}
346impl<R: PartialEq> PartialEq for SuffixInfo<R> {
347    fn eq(&self, other: &Self) -> bool {
348        self.suffix == other.suffix
349    }
350}
351
352impl<Repr: Representation> SuffixInfo<Repr> {
353    /// Append this suffix (and eventual `.gz`) to a path
354    pub fn to_path(&self, basepath: &Path) -> PathBuf {
355        let path = self.suffix.to_path(basepath);
356        if self.compressed {
357            PathBuf::from(format!("{}.gz", path.display()))
358        } else {
359            path
360        }
361    }
362}
363
364impl<Repr: Representation> Ord for SuffixInfo<Repr> {
365    fn cmp(&self, other: &Self) -> Ordering {
366        self.suffix.cmp(&other.suffix)
367    }
368}
369impl<Repr: Representation> PartialOrd for SuffixInfo<Repr> {
370    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
371        Some(self.cmp(other))
372    }
373}
374
375/// The main writer used for rotating logs.
376#[derive(Debug)]
377pub struct FileRotate<S: SuffixScheme> {
378    basepath: PathBuf,
379    file: Option<File>,
380    modified: Option<DateTime<Local>>,
381    content_limit: ContentLimit,
382    count: usize,
383    compression: Compression,
384    suffix_scheme: S,
385    /// The bool is whether or not there's a .gz suffix to the filename
386    suffixes: BTreeSet<SuffixInfo<S::Repr>>,
387    open_options: Option<OpenOptions>,
388}
389
390impl<S: SuffixScheme> FileRotate<S> {
391    /// Create a new [FileRotate].
392    ///
393    /// The basename of the `path` is used to create new log files by appending an extension of the
394    /// form `.N`, where N is `0..=max_files`.
395    ///
396    /// `content_limit` specifies the limits for rotating a file.
397    ///
398    /// `open_options`: If provided, you must set `.read(true).create(true).append(true)`!
399    ///
400    /// # Panics
401    ///
402    /// Panics if `bytes == 0` or `lines == 0`.
403    pub fn new<P: AsRef<Path>>(
404        path: P,
405        suffix_scheme: S,
406        content_limit: ContentLimit,
407        compression: Compression,
408        open_options: Option<OpenOptions>,
409    ) -> Self {
410        match content_limit {
411            ContentLimit::Bytes(bytes) => {
412                assert!(bytes > 0);
413            }
414            ContentLimit::Lines(lines) => {
415                assert!(lines > 0);
416            }
417            ContentLimit::Time(_) => {}
418            ContentLimit::BytesSurpassed(bytes) => {
419                assert!(bytes > 0);
420            }
421            ContentLimit::None => {}
422        };
423
424        let basepath = path.as_ref().to_path_buf();
425        fs::create_dir_all(basepath.parent().unwrap()).expect("create dir");
426
427        let mut s = Self {
428            file: None,
429            modified: None,
430            basepath,
431            content_limit,
432            count: 0,
433            compression,
434            suffixes: BTreeSet::new(),
435            suffix_scheme,
436            open_options,
437        };
438        s.ensure_log_directory_exists();
439        s.scan_suffixes();
440
441        s
442    }
443    fn ensure_log_directory_exists(&mut self) {
444        let path = self.basepath.parent().unwrap();
445        if !path.exists() {
446            let _ = fs::create_dir_all(path).expect("create dir");
447            self.scan_suffixes();
448        }
449        if !self.basepath.exists() || self.file.is_none() {
450            // Open or create the file
451            self.open_file();
452
453            match self.file {
454                None => self.count = 0,
455                Some(ref mut file) => {
456                    match self.content_limit {
457                        ContentLimit::Bytes(_) | ContentLimit::BytesSurpassed(_) => {
458                            // Update byte `count`
459                            if let Ok(metadata) = file.metadata() {
460                                self.count = metadata.len() as usize;
461                            } else {
462                                self.count = 0;
463                            }
464                        }
465                        ContentLimit::Lines(_) => {
466                            self.count = BufReader::new(file).lines().count();
467                        }
468                        ContentLimit::Time(_) => {
469                            self.modified = mtime(file);
470                        }
471                        ContentLimit::None => {}
472                    }
473                }
474            }
475        }
476    }
477
478    fn open_file(&mut self) {
479        let open_options = self.open_options.clone().unwrap_or_else(|| {
480            let mut o = OpenOptions::new();
481            o.read(true).create(true).append(true);
482            o
483        });
484
485        self.file = open_options.open(&self.basepath).ok();
486    }
487
488    fn scan_suffixes(&mut self) {
489        self.suffixes = self.suffix_scheme.scan_suffixes(&self.basepath);
490    }
491    /// Get paths of rotated log files (excluding the original/current log file), ordered from
492    /// oldest to most recent
493    pub fn log_paths(&mut self) -> Vec<PathBuf> {
494        self.suffixes
495            .iter()
496            .rev()
497            .map(|suffix| suffix.to_path(&self.basepath))
498            .collect::<Vec<_>>()
499    }
500
501    /// Recursive function that keeps moving files if there's any file name collision.
502    /// If `suffix` is `None`, it moves from basepath to next suffix given by the SuffixScheme
503    /// Assumption: Any collision in file name is due to an old log file.
504    ///
505    /// Returns the suffix of the new file (the last suffix after possible cascade of renames).
506    fn move_file_with_suffix(
507        &mut self,
508        old_suffix_info: Option<SuffixInfo<S::Repr>>,
509    ) -> io::Result<SuffixInfo<S::Repr>> {
510        // NOTE: this newest_suffix is there only because AppendTimestamp specifically needs
511        // it. Otherwise it might not be necessary to provide this to `rotate_file`. We could also
512        // have passed the internal BTreeMap itself, but it would require to make SuffixInfo `pub`.
513
514        let newest_suffix = self.suffixes.iter().next().map(|info| &info.suffix);
515
516        let new_suffix = self.suffix_scheme.rotate_file(
517            &self.basepath,
518            newest_suffix,
519            &old_suffix_info.clone().map(|i| i.suffix),
520        )?;
521
522        // The destination file/path eventual .gz suffix must match the source path
523        let new_suffix_info = SuffixInfo {
524            suffix: new_suffix,
525            compressed: old_suffix_info
526                .as_ref()
527                .map(|x| x.compressed)
528                .unwrap_or(false),
529        };
530        let new_path = new_suffix_info.to_path(&self.basepath);
531
532        // Whatever exists that would block a move to the new suffix
533        let existing_suffix_info = self.suffixes.get(&new_suffix_info).cloned();
534
535        // Move destination file out of the way if it exists
536        let newly_created_suffix = if let Some(existing_suffix_info) = existing_suffix_info {
537            // We might move files in a way that the destination path doesn't equal the path that
538            // was replaced. Due to possible `.gz`, a "conflicting" file doesn't mean that paths
539            // are equal.
540            self.suffixes.replace(new_suffix_info);
541            // Recurse to move conflicting file.
542            self.move_file_with_suffix(Some(existing_suffix_info))?
543        } else {
544            new_suffix_info
545        };
546
547        let old_path = match old_suffix_info {
548            Some(suffix) => suffix.to_path(&self.basepath),
549            None => self.basepath.clone(),
550        };
551
552        // Do the move
553        assert!(old_path.exists());
554        assert!(!new_path.exists());
555        fs::rename(old_path, new_path)?;
556
557        Ok(newly_created_suffix)
558    }
559
560    /// Trigger a log rotation manually. This is mostly intended for use with `ContentLimit::None`
561    /// but will work with all content limits.
562    pub fn rotate(&mut self) -> io::Result<()> {
563        self.ensure_log_directory_exists();
564
565        let _ = self.file.take();
566
567        // This function will always create a new file. Returns suffix of that file
568        let new_suffix_info = self.move_file_with_suffix(None)?;
569        self.suffixes.insert(new_suffix_info);
570
571        self.open_file();
572
573        self.count = 0;
574
575        self.handle_old_files()?;
576
577        Ok(())
578    }
579    fn handle_old_files(&mut self) -> io::Result<()> {
580        // Find the youngest suffix that is too old, and then remove all suffixes that are older or
581        // equally old:
582        let mut youngest_old = None;
583        // Start from oldest suffix, stop when we find a suffix that is not too old
584        let mut result = Ok(());
585        for (i, suffix) in self.suffixes.iter().enumerate().rev() {
586            if self.suffix_scheme.too_old(&suffix.suffix, i) {
587                result = result.and(fs::remove_file(suffix.to_path(&self.basepath)));
588                youngest_old = Some((*suffix).clone());
589            } else {
590                break;
591            }
592        }
593        if let Some(youngest_old) = youngest_old {
594            // Removes all the too old
595            let _ = self.suffixes.split_off(&youngest_old);
596        }
597
598        // Compression
599        if let Compression::OnRotate(max_file_n) = self.compression {
600            let n = (self.suffixes.len() as i32 - max_file_n as i32).max(0) as usize;
601            // The oldest N files should be compressed
602            let suffixes_to_compress = self
603                .suffixes
604                .iter()
605                .rev()
606                .take(n)
607                .filter(|info| !info.compressed)
608                .cloned()
609                .collect::<Vec<_>>();
610            for info in suffixes_to_compress {
611                // Do the compression
612                let path = info.suffix.to_path(&self.basepath);
613                compress(&path)?;
614
615                self.suffixes.replace(SuffixInfo {
616                    compressed: true,
617                    ..info
618                });
619            }
620        }
621
622        result
623    }
624}
625
626impl<S: SuffixScheme> Write for FileRotate<S> {
627    fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
628        let written = buf.len();
629        match self.content_limit {
630            ContentLimit::Bytes(bytes) => {
631                while self.count + buf.len() > bytes {
632                    let bytes_left = bytes.saturating_sub(self.count);
633                    if let Some(ref mut file) = self.file {
634                        file.write_all(&buf[..bytes_left])?;
635                    }
636                    self.rotate()?;
637                    buf = &buf[bytes_left..];
638                }
639                self.count += buf.len();
640                if let Some(ref mut file) = self.file {
641                    file.write_all(buf)?;
642                }
643            }
644            ContentLimit::Time(time) => {
645                let local: DateTime<Local> = now();
646
647                if let Some(modified) = self.modified {
648                    match time {
649                        TimeFrequency::Hourly => {
650                            if local.hour() != modified.hour()
651                                || local.day() != modified.day()
652                                || local.month() != modified.month()
653                                || local.year() != modified.year()
654                            {
655                                self.rotate()?;
656                            }
657                        }
658                        TimeFrequency::Daily => {
659                            if local.date() > modified.date() {
660                                self.rotate()?;
661                            }
662                        }
663                        TimeFrequency::Weekly => {
664                            if local.iso_week().week() != modified.iso_week().week()
665                                || local.year() > modified.year()
666                            {
667                                self.rotate()?;
668                            }
669                        }
670                        TimeFrequency::Monthly => {
671                            if local.month() != modified.month() || local.year() != modified.year()
672                            {
673                                self.rotate()?;
674                            }
675                        }
676                        TimeFrequency::Yearly => {
677                            if local.year() > modified.year() {
678                                self.rotate()?;
679                            }
680                        }
681                    }
682                }
683
684                if let Some(ref mut file) = self.file {
685                    file.write_all(buf)?;
686
687                    self.modified = Some(local);
688                }
689            }
690            ContentLimit::Lines(lines) => {
691                while let Some((idx, _)) = buf.iter().enumerate().find(|(_, byte)| *byte == &b'\n')
692                {
693                    if let Some(ref mut file) = self.file {
694                        file.write_all(&buf[..idx + 1])?;
695                    }
696                    self.count += 1;
697                    buf = &buf[idx + 1..];
698                    if self.count >= lines {
699                        self.rotate()?;
700                    }
701                }
702                if let Some(ref mut file) = self.file {
703                    file.write_all(buf)?;
704                }
705            }
706            ContentLimit::BytesSurpassed(bytes) => {
707                if self.count > bytes {
708                    self.rotate()?
709                }
710                if let Some(ref mut file) = self.file {
711                    file.write_all(buf)?;
712                }
713                self.count += buf.len();
714            }
715            ContentLimit::None => {
716                if let Some(ref mut file) = self.file {
717                    file.write_all(buf)?;
718                }
719            }
720        }
721        Ok(written)
722    }
723
724    fn flush(&mut self) -> io::Result<()> {
725        self.file
726            .as_mut()
727            .map(|file| file.flush())
728            .unwrap_or(Ok(()))
729    }
730}
731
732/// Get modification time, in non test case.
733#[cfg(not(test))]
734fn mtime(file: &File) -> Option<DateTime<Local>> {
735    if let Ok(time) = file.metadata().and_then(|metadata| metadata.modified()) {
736        return Some(time.into());
737    }
738
739    None
740}
741
742/// Get modification time, in test case.
743#[cfg(test)]
744fn mtime(_: &File) -> Option<DateTime<Local>> {
745    Some(now())
746}
747
748/// Get system time, in non test case.
749#[cfg(not(test))]
750fn now() -> DateTime<Local> {
751    Local::now()
752}
753
754/// Get mocked system time, in test case.
755#[cfg(test)]
756pub mod mock_time {
757    use super::*;
758    use std::cell::RefCell;
759
760    thread_local! {
761        static MOCK_TIME: RefCell<Option<DateTime<Local>>> = RefCell::new(None);
762    }
763
764    /// Get current _mocked_ time
765    pub fn now() -> DateTime<Local> {
766        MOCK_TIME.with(|cell| cell.borrow().as_ref().cloned().unwrap_or_else(Local::now))
767    }
768
769    /// Set mocked time
770    pub fn set_mock_time(time: DateTime<Local>) {
771        MOCK_TIME.with(|cell| *cell.borrow_mut() = Some(time));
772    }
773}
774
775#[cfg(test)]
776pub use mock_time::now;