Skip to main content

rusmes_config/
logging.rs

1//! Structured logging with file rotation for RusMES
2//!
3//! This module provides comprehensive logging capabilities including:
4//! - File rotation (daily, hourly, size-based)
5//! - JSON and plain text formatting
6//! - Configurable log levels per module
7//! - Log file compression and archiving
8
9use anyhow::{Context, Result};
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12use std::fs;
13use std::path::{Path, PathBuf};
14use tracing::Level;
15use tracing_appender::non_blocking::WorkerGuard;
16use tracing_subscriber::fmt::format::FmtSpan;
17use tracing_subscriber::layer::SubscriberExt;
18use tracing_subscriber::util::SubscriberInitExt;
19use tracing_subscriber::EnvFilter;
20
21/// Log rotation policy
22#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)]
23#[serde(rename_all = "lowercase")]
24pub enum RotationPolicy {
25    /// Rotate logs daily
26    #[default]
27    Daily,
28    /// Rotate logs hourly
29    Hourly,
30    /// Rotate logs based on file size
31    SizeBased,
32    /// Never rotate logs
33    Never,
34}
35
36/// Log format type
37#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)]
38#[serde(rename_all = "lowercase")]
39pub enum LogFormat {
40    /// Plain text format
41    #[default]
42    Text,
43    /// JSON format
44    Json,
45}
46
47/// Complete logging configuration
48#[derive(Debug, Clone, Deserialize, Serialize)]
49pub struct LogConfig {
50    /// Log level (trace, debug, info, warn, error)
51    #[serde(default = "default_level")]
52    pub level: String,
53
54    /// Log format (text or json)
55    #[serde(default)]
56    pub format: LogFormat,
57
58    /// Directory for log files
59    #[serde(default = "default_log_dir")]
60    pub log_dir: String,
61
62    /// Base name for log files
63    #[serde(default = "default_file_prefix")]
64    pub file_prefix: String,
65
66    /// Rotation policy
67    #[serde(default)]
68    pub rotation: RotationPolicy,
69
70    /// Maximum file size for size-based rotation (e.g., "100MB")
71    #[serde(default = "default_max_size")]
72    pub max_size: String,
73
74    /// Maximum number of archived log files to keep
75    #[serde(default = "default_max_backups")]
76    pub max_backups: usize,
77
78    /// Whether to compress archived logs
79    #[serde(default = "default_compress")]
80    pub compress: bool,
81
82    /// Per-module log level overrides
83    #[serde(default)]
84    pub module_levels: HashMap<String, String>,
85
86    /// Whether to log to stdout in addition to files
87    #[serde(default)]
88    pub also_stdout: bool,
89}
90
91fn default_level() -> String {
92    "info".to_string()
93}
94
95fn default_log_dir() -> String {
96    "/var/log/rusmes".to_string()
97}
98
99fn default_file_prefix() -> String {
100    "rusmes".to_string()
101}
102
103fn default_max_size() -> String {
104    "100MB".to_string()
105}
106
107fn default_max_backups() -> usize {
108    10
109}
110
111fn default_compress() -> bool {
112    true
113}
114
115impl Default for LogConfig {
116    fn default() -> Self {
117        Self {
118            level: default_level(),
119            format: LogFormat::default(),
120            log_dir: default_log_dir(),
121            file_prefix: default_file_prefix(),
122            rotation: RotationPolicy::default(),
123            max_size: default_max_size(),
124            max_backups: default_max_backups(),
125            compress: default_compress(),
126            module_levels: HashMap::new(),
127            also_stdout: false,
128        }
129    }
130}
131
132impl LogConfig {
133    /// Validate the log configuration
134    pub fn validate(&self) -> Result<()> {
135        // Validate log level
136        self.parse_level()
137            .with_context(|| format!("Invalid log level: {}", self.level))?;
138
139        // Validate module levels
140        for (module, level) in &self.module_levels {
141            level
142                .parse::<Level>()
143                .with_context(|| format!("Invalid level '{}' for module '{}'", level, module))?;
144        }
145
146        // Validate max size for size-based rotation
147        if self.rotation == RotationPolicy::SizeBased {
148            self.max_size_bytes()
149                .with_context(|| format!("Invalid max_size: {}", self.max_size))?;
150        }
151
152        // Validate log directory can be created
153        if let Some(parent) = Path::new(&self.log_dir).parent() {
154            if !parent.exists() {
155                anyhow::bail!(
156                    "Parent directory of log_dir does not exist: {}",
157                    parent.display()
158                );
159            }
160        }
161
162        Ok(())
163    }
164
165    /// Parse log level string to tracing Level
166    pub fn parse_level(&self) -> Result<Level> {
167        self.level
168            .parse::<Level>()
169            .map_err(|e| anyhow::anyhow!("Invalid log level: {}", e))
170    }
171
172    /// Parse max size to bytes
173    pub fn max_size_bytes(&self) -> Result<usize> {
174        parse_size(&self.max_size)
175    }
176
177    /// Build an EnvFilter from the configuration
178    pub fn build_filter(&self) -> Result<EnvFilter> {
179        let mut filter = EnvFilter::new(&self.level);
180
181        // Add module-specific filters
182        for (module, level) in &self.module_levels {
183            filter =
184                filter.add_directive(format!("{}={}", module, level).parse().with_context(
185                    || format!("Invalid filter directive for module '{}'", module),
186                )?);
187        }
188
189        Ok(filter)
190    }
191}
192
193/// Initialize logging based on configuration
194///
195/// This function sets up the global tracing subscriber with the specified
196/// configuration. It must be called only once at application startup.
197///
198/// Returns a `WorkerGuard` that must be kept alive for the duration of the program.
199/// Dropping the guard will cause log messages to be lost.
200#[allow(clippy::type_complexity)]
201pub fn init_logging(config: &LogConfig) -> Result<Option<(WorkerGuard, Option<WorkerGuard>)>> {
202    // Validate configuration
203    config.validate()?;
204
205    // Create log directory if it doesn't exist
206    fs::create_dir_all(&config.log_dir)
207        .with_context(|| format!("Failed to create log directory: {}", config.log_dir))?;
208
209    // Build the environment filter
210    let filter = config.build_filter()?;
211
212    // Set up file appender based on rotation policy
213    let file_appender = match config.rotation {
214        RotationPolicy::Daily => {
215            tracing_appender::rolling::daily(&config.log_dir, &config.file_prefix)
216        }
217        RotationPolicy::Hourly => {
218            tracing_appender::rolling::hourly(&config.log_dir, &config.file_prefix)
219        }
220        RotationPolicy::Never => {
221            tracing_appender::rolling::never(&config.log_dir, &config.file_prefix)
222        }
223        RotationPolicy::SizeBased => {
224            // For size-based rotation, we use the daily appender and handle rotation separately
225            tracing_appender::rolling::daily(&config.log_dir, &config.file_prefix)
226        }
227    };
228
229    let (non_blocking_file, file_guard) = tracing_appender::non_blocking(file_appender);
230
231    // Set up stdout appender if requested
232    let stdout_guard = if config.also_stdout {
233        let (non_blocking_stdout, guard) = tracing_appender::non_blocking(std::io::stdout());
234
235        match config.format {
236            LogFormat::Text => {
237                let stdout_layer = tracing_subscriber::fmt::layer()
238                    .with_writer(non_blocking_stdout)
239                    .with_span_events(FmtSpan::CLOSE);
240
241                let file_layer = tracing_subscriber::fmt::layer()
242                    .with_writer(non_blocking_file)
243                    .with_span_events(FmtSpan::CLOSE)
244                    .with_ansi(false);
245
246                tracing_subscriber::registry()
247                    .with(filter)
248                    .with(stdout_layer)
249                    .with(file_layer)
250                    .init();
251            }
252            LogFormat::Json => {
253                let stdout_layer = tracing_subscriber::fmt::layer()
254                    .json()
255                    .with_writer(non_blocking_stdout);
256
257                let file_layer = tracing_subscriber::fmt::layer()
258                    .json()
259                    .with_writer(non_blocking_file);
260
261                tracing_subscriber::registry()
262                    .with(filter)
263                    .with(stdout_layer)
264                    .with(file_layer)
265                    .init();
266            }
267        }
268
269        Some(guard)
270    } else {
271        match config.format {
272            LogFormat::Text => {
273                tracing_subscriber::registry()
274                    .with(filter)
275                    .with(
276                        tracing_subscriber::fmt::layer()
277                            .with_writer(non_blocking_file)
278                            .with_span_events(FmtSpan::CLOSE)
279                            .with_ansi(false),
280                    )
281                    .init();
282            }
283            LogFormat::Json => {
284                tracing_subscriber::registry()
285                    .with(filter)
286                    .with(
287                        tracing_subscriber::fmt::layer()
288                            .json()
289                            .with_writer(non_blocking_file),
290                    )
291                    .init();
292            }
293        }
294
295        None
296    };
297
298    // Start background task for archiving and compression if enabled
299    if config.compress && config.max_backups > 0 {
300        let config_clone = config.clone();
301        std::thread::spawn(move || {
302            archive_old_logs(&config_clone);
303        });
304    }
305
306    Ok(Some((file_guard, stdout_guard)))
307}
308
309/// Archive and compress old log files
310fn archive_old_logs(config: &LogConfig) {
311    let log_dir = Path::new(&config.log_dir);
312
313    // Find all log files matching the prefix
314    let mut log_files: Vec<PathBuf> = match fs::read_dir(log_dir) {
315        Ok(entries) => entries
316            .filter_map(|entry| entry.ok())
317            .map(|entry| entry.path())
318            .filter(|path| {
319                path.file_name()
320                    .and_then(|name| name.to_str())
321                    .map(|name| name.starts_with(&config.file_prefix) && !name.ends_with(".gz"))
322                    .unwrap_or(false)
323            })
324            .collect(),
325        Err(_) => return,
326    };
327
328    // Sort by modification time (oldest first)
329    log_files.sort_by_key(|path| fs::metadata(path).and_then(|m| m.modified()).ok());
330
331    // Keep only the most recent files, compress the rest
332    let current_file = format!("{}.log", config.file_prefix);
333
334    for (idx, log_file) in log_files.iter().enumerate() {
335        // Skip the current log file
336        if log_file.file_name().and_then(|n| n.to_str()) == Some(&current_file) {
337            continue;
338        }
339
340        // If we have more than max_backups, delete old files
341        if idx >= config.max_backups {
342            let _ = fs::remove_file(log_file);
343            continue;
344        }
345
346        // Compress if not already compressed
347        if config.compress {
348            let _ = compress_log_file(log_file);
349        }
350    }
351}
352
353/// Compress a log file using gzip
354fn compress_log_file(path: &Path) -> Result<()> {
355    let input =
356        fs::read(path).with_context(|| format!("Failed to read log file: {}", path.display()))?;
357
358    let output_path = path.with_extension("log.gz");
359
360    let compressed = oxiarc_deflate::gzip_compress(&input, 6)
361        .map_err(|e| anyhow::anyhow!("Failed to compress log file {}: {}", path.display(), e))?;
362
363    fs::write(&output_path, &compressed)
364        .with_context(|| format!("Failed to write compressed file: {}", output_path.display()))?;
365
366    // Remove original file after successful compression
367    fs::remove_file(path)
368        .with_context(|| format!("Failed to remove original log file: {}", path.display()))?;
369
370    Ok(())
371}
372
373/// Parse size string like "50MB", "1GB", "1024KB"
374fn parse_size(s: &str) -> Result<usize> {
375    let s = s.trim().to_uppercase();
376
377    if let Some(rest) = s.strip_suffix("GB") {
378        let num: f64 = rest.trim().parse()?;
379        Ok((num * 1024.0 * 1024.0 * 1024.0) as usize)
380    } else if let Some(rest) = s.strip_suffix("MB") {
381        let num: f64 = rest.trim().parse()?;
382        Ok((num * 1024.0 * 1024.0) as usize)
383    } else if let Some(rest) = s.strip_suffix("KB") {
384        let num: f64 = rest.trim().parse()?;
385        Ok((num * 1024.0) as usize)
386    } else if let Some(rest) = s.strip_suffix('B') {
387        let num: usize = rest.trim().parse()?;
388        Ok(num)
389    } else {
390        // Assume bytes
391        let num: usize = s.parse()?;
392        Ok(num)
393    }
394}
395
396#[cfg(test)]
397mod tests {
398    use super::*;
399
400    #[test]
401    fn test_parse_size() {
402        assert_eq!(parse_size("1024").unwrap(), 1024);
403        assert_eq!(parse_size("1KB").unwrap(), 1024);
404        assert_eq!(parse_size("1MB").unwrap(), 1024 * 1024);
405        assert_eq!(parse_size("1GB").unwrap(), 1024 * 1024 * 1024);
406        assert_eq!(parse_size("50MB").unwrap(), 50 * 1024 * 1024);
407        assert_eq!(parse_size("100MB").unwrap(), 100 * 1024 * 1024);
408        assert_eq!(
409            parse_size("2.5GB").unwrap(),
410            (2.5 * 1024.0 * 1024.0 * 1024.0) as usize
411        );
412    }
413
414    #[test]
415    fn test_default_log_config() {
416        let config = LogConfig::default();
417        assert_eq!(config.level, "info");
418        assert_eq!(config.format, LogFormat::Text);
419        assert_eq!(config.rotation, RotationPolicy::Daily);
420        assert_eq!(config.max_backups, 10);
421        assert!(config.compress);
422        assert!(!config.also_stdout);
423    }
424
425    #[test]
426    fn test_log_config_parse_level() {
427        let config = LogConfig {
428            level: "debug".to_string(),
429            ..Default::default()
430        };
431        assert!(config.parse_level().is_ok());
432        assert_eq!(config.parse_level().unwrap(), Level::DEBUG);
433
434        let config = LogConfig {
435            level: "invalid".to_string(),
436            ..Default::default()
437        };
438        assert!(config.parse_level().is_err());
439    }
440
441    #[test]
442    fn test_log_config_max_size_bytes() {
443        let config = LogConfig {
444            max_size: "100MB".to_string(),
445            ..Default::default()
446        };
447        assert_eq!(config.max_size_bytes().unwrap(), 100 * 1024 * 1024);
448
449        let config = LogConfig {
450            max_size: "1GB".to_string(),
451            ..Default::default()
452        };
453        assert_eq!(config.max_size_bytes().unwrap(), 1024 * 1024 * 1024);
454    }
455
456    #[test]
457    fn test_rotation_policy_serialization() {
458        let daily = RotationPolicy::Daily;
459        let json = serde_json::to_string(&daily).unwrap();
460        assert_eq!(json, r#""daily""#);
461
462        let hourly = RotationPolicy::Hourly;
463        let json = serde_json::to_string(&hourly).unwrap();
464        assert_eq!(json, r#""hourly""#);
465
466        let size_based = RotationPolicy::SizeBased;
467        let json = serde_json::to_string(&size_based).unwrap();
468        assert_eq!(json, r#""sizebased""#);
469    }
470
471    #[test]
472    fn test_log_format_serialization() {
473        let text = LogFormat::Text;
474        let json = serde_json::to_string(&text).unwrap();
475        assert_eq!(json, r#""text""#);
476
477        let json_format = LogFormat::Json;
478        let json = serde_json::to_string(&json_format).unwrap();
479        assert_eq!(json, r#""json""#);
480    }
481
482    #[test]
483    fn test_build_filter_with_module_levels() {
484        let mut module_levels = HashMap::new();
485        module_levels.insert("rusmes_smtp".to_string(), "debug".to_string());
486        module_levels.insert("rusmes_imap".to_string(), "trace".to_string());
487
488        let config = LogConfig {
489            level: "info".to_string(),
490            module_levels,
491            ..Default::default()
492        };
493
494        let filter = config.build_filter();
495        assert!(filter.is_ok());
496    }
497
498    #[test]
499    fn test_build_filter_with_invalid_module_level() {
500        let mut module_levels = HashMap::new();
501        module_levels.insert("rusmes_smtp".to_string(), "invalid".to_string());
502
503        let config = LogConfig {
504            level: "info".to_string(),
505            module_levels,
506            ..Default::default()
507        };
508
509        let filter = config.build_filter();
510        assert!(filter.is_err());
511    }
512
513    #[test]
514    fn test_log_config_deserialization_toml() {
515        let toml_str = r#"
516            level = "debug"
517            format = "json"
518            log_dir = "/tmp/test_logs"
519            file_prefix = "test"
520            rotation = "hourly"
521            max_size = "50MB"
522            max_backups = 5
523            compress = false
524            also_stdout = true
525        "#;
526
527        let config: LogConfig = toml::from_str(toml_str).unwrap();
528        assert_eq!(config.level, "debug");
529        assert_eq!(config.format, LogFormat::Json);
530        assert_eq!(config.log_dir, "/tmp/test_logs");
531        assert_eq!(config.file_prefix, "test");
532        assert_eq!(config.rotation, RotationPolicy::Hourly);
533        assert_eq!(config.max_size, "50MB");
534        assert_eq!(config.max_backups, 5);
535        assert!(!config.compress);
536        assert!(config.also_stdout);
537    }
538
539    #[test]
540    fn test_log_config_with_module_levels_toml() {
541        let toml_str = r#"
542            level = "info"
543            format = "text"
544
545            [module_levels]
546            rusmes_smtp = "debug"
547            rusmes_imap = "trace"
548            rusmes_core = "warn"
549        "#;
550
551        let config: LogConfig = toml::from_str(toml_str).unwrap();
552        assert_eq!(config.module_levels.len(), 3);
553        assert_eq!(
554            config.module_levels.get("rusmes_smtp"),
555            Some(&"debug".to_string())
556        );
557        assert_eq!(
558            config.module_levels.get("rusmes_imap"),
559            Some(&"trace".to_string())
560        );
561        assert_eq!(
562            config.module_levels.get("rusmes_core"),
563            Some(&"warn".to_string())
564        );
565    }
566}