Skip to main content

rush_sync_server/server/
logging.rs

1// src/server/logging.rs
2use crate::core::config::LoggingConfig;
3use crate::core::prelude::*;
4use actix_web::HttpMessage;
5use serde::{Deserialize, Serialize};
6use std::collections::HashMap;
7use std::path::PathBuf;
8use std::time::{SystemTime, UNIX_EPOCH};
9
10#[derive(Debug, Serialize, Deserialize, Clone)]
11pub struct ServerLogEntry {
12    pub timestamp: String,
13    pub timestamp_unix: u64,
14    pub event_type: LogEventType,
15    pub ip_address: String,
16    pub user_agent: Option<String>,
17    pub method: String,
18    pub path: String,
19    pub status_code: Option<u16>,
20    pub response_time_ms: Option<u64>,
21    pub bytes_sent: Option<u64>,
22    pub referer: Option<String>,
23    pub query_string: Option<String>,
24    pub headers: HashMap<String, String>,
25    pub session_id: Option<String>,
26}
27
28// Copy trait needed for use after move in write_alert_entry
29#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
30pub enum LogEventType {
31    Request,
32    ServerStart,
33    ServerStop,
34    ServerError,
35    SecurityAlert,
36    PerformanceWarning,
37}
38
39#[derive(Debug, Clone)]
40pub struct LogRotationConfig {
41    pub max_file_size_bytes: u64,
42    pub max_archive_files: u8,
43    pub compress_archives: bool,
44}
45
46impl From<&LoggingConfig> for LogRotationConfig {
47    fn from(config: &LoggingConfig) -> Self {
48        Self {
49            max_file_size_bytes: config.max_file_size_mb * 1024 * 1024,
50            max_archive_files: config.max_archive_files,
51            compress_archives: config.compress_archives,
52        }
53    }
54}
55
56impl Default for LogRotationConfig {
57    fn default() -> Self {
58        Self {
59            max_file_size_bytes: 100 * 1024 * 1024,
60            max_archive_files: 9,
61            compress_archives: true,
62        }
63    }
64}
65
66pub struct ServerLogger {
67    log_file_path: PathBuf,
68    config: LogRotationConfig,
69    log_requests: bool,
70    log_security: bool,
71    log_performance: bool,
72}
73
74impl ServerLogger {
75    pub fn new_with_config(
76        server_name: &str,
77        port: u16,
78        logging_config: &LoggingConfig,
79    ) -> Result<Self> {
80        let base_dir = crate::core::helpers::get_base_dir()?;
81
82        let log_file_path = base_dir
83            .join(".rss")
84            .join("servers")
85            .join(format!("{}-[{}].log", server_name, port));
86
87        if let Some(parent) = log_file_path.parent() {
88            std::fs::create_dir_all(parent).map_err(AppError::Io)?;
89        }
90
91        Ok(Self {
92            log_file_path,
93            config: LogRotationConfig::from(logging_config),
94            log_requests: logging_config.log_requests,
95            log_security: logging_config.log_security_alerts,
96            log_performance: logging_config.log_performance,
97        })
98    }
99
100    pub fn new(server_name: &str, port: u16) -> Result<Self> {
101        Self::new_with_config(server_name, port, &LoggingConfig::default())
102    }
103
104    // System log helpers
105    pub async fn log_server_start(&self) -> Result<()> {
106        self.write_system_entry(LogEventType::ServerStart).await
107    }
108
109    pub async fn log_server_stop(&self) -> Result<()> {
110        self.write_system_entry(LogEventType::ServerStop).await
111    }
112
113    async fn write_system_entry(&self, event_type: LogEventType) -> Result<()> {
114        let entry = ServerLogEntry {
115            timestamp: chrono::Local::now()
116                .format("%Y-%m-%d %H:%M:%S%.3f")
117                .to_string(),
118            timestamp_unix: SystemTime::now()
119                .duration_since(UNIX_EPOCH)
120                .unwrap_or_default()
121                .as_secs(),
122            event_type,
123            ip_address: "127.0.0.1".to_string(),
124            user_agent: None,
125            method: "SYSTEM".to_string(),
126            path: "/".to_string(),
127            status_code: None,
128            response_time_ms: None,
129            bytes_sent: None,
130            referer: None,
131            query_string: None,
132            headers: HashMap::new(),
133            session_id: None,
134        };
135        self.write_log_entry(entry).await
136    }
137
138    // Request logging
139    pub async fn log_request(
140        &self,
141        req: &actix_web::HttpRequest,
142        status: u16,
143        response_time: u64,
144        bytes_sent: u64,
145    ) -> Result<()> {
146        if !self.log_requests {
147            return Ok(());
148        }
149
150        let ip = {
151            let connection_info = req.connection_info();
152            connection_info
153                .realip_remote_addr()
154                .or_else(|| connection_info.peer_addr())
155                .unwrap_or("unknown")
156                .split(':')
157                .next()
158                .unwrap_or("unknown")
159                .to_string()
160        };
161
162        // Filter to relevant headers only, redact sensitive ones
163        let headers = req
164            .headers()
165            .iter()
166            .filter_map(|(name, value)| {
167                let header_name = name.as_str().to_lowercase();
168                match header_name.as_str() {
169                    "authorization" | "cookie" | "x-api-key" => {
170                        Some((name.as_str().to_string(), "[FILTERED]".to_string()))
171                    }
172                    _ => value
173                        .to_str()
174                        .ok()
175                        .map(|v| (name.as_str().to_string(), v.to_string())),
176                }
177            })
178            .collect();
179
180        let entry = ServerLogEntry {
181            timestamp: chrono::Local::now()
182                .format("%Y-%m-%d %H:%M:%S%.3f")
183                .to_string(),
184            timestamp_unix: SystemTime::now()
185                .duration_since(UNIX_EPOCH)
186                .unwrap_or_default()
187                .as_secs(),
188            event_type: LogEventType::Request,
189            ip_address: ip.to_string(),
190            user_agent: req
191                .headers()
192                .get("user-agent")
193                .and_then(|h| h.to_str().ok())
194                .map(String::from),
195            method: req.method().to_string(),
196            path: req.path().to_string(),
197            status_code: Some(status),
198            response_time_ms: Some(response_time),
199            bytes_sent: Some(bytes_sent),
200            referer: req
201                .headers()
202                .get("referer")
203                .and_then(|h| h.to_str().ok())
204                .map(String::from),
205            query_string: if req.query_string().is_empty() {
206                None
207            } else {
208                Some(req.query_string().to_string())
209            },
210            headers,
211            session_id: req.extensions().get::<String>().cloned(),
212        };
213
214        self.write_log_entry(entry).await
215    }
216
217    // Alert logging
218    pub async fn log_security_alert(&self, ip: &str, reason: &str, details: &str) -> Result<()> {
219        if !self.log_security {
220            return Ok(());
221        }
222        self.write_alert_entry(LogEventType::SecurityAlert, ip, reason, details, None)
223            .await
224    }
225
226    pub async fn log_performance_warning(
227        &self,
228        metric: &str,
229        value: u64,
230        threshold: u64,
231    ) -> Result<()> {
232        if !self.log_performance {
233            return Ok(());
234        }
235        self.write_alert_entry(
236            LogEventType::PerformanceWarning,
237            "127.0.0.1",
238            metric,
239            &format!("value={}, threshold={}", value, threshold),
240            Some(value),
241        )
242        .await
243    }
244
245    // Write an alert log entry (security or performance)
246    async fn write_alert_entry(
247        &self,
248        event_type: LogEventType,
249        ip: &str,
250        reason: &str,
251        details: &str,
252        response_time: Option<u64>,
253    ) -> Result<()> {
254        let mut headers = HashMap::new();
255        headers.insert("alert_reason".to_string(), reason.to_string());
256        headers.insert("alert_details".to_string(), details.to_string());
257
258        let method_name = match event_type {
259            LogEventType::SecurityAlert => "SECURITY",
260            LogEventType::PerformanceWarning => "PERFORMANCE",
261            _ => "SYSTEM",
262        };
263
264        let entry = ServerLogEntry {
265            timestamp: chrono::Local::now()
266                .format("%Y-%m-%d %H:%M:%S%.3f")
267                .to_string(),
268            timestamp_unix: SystemTime::now()
269                .duration_since(UNIX_EPOCH)
270                .unwrap_or_default()
271                .as_secs(),
272            event_type,
273            ip_address: ip.to_string(),
274            user_agent: None,
275            method: method_name.to_string(),
276            path: "/".to_string(),
277            status_code: None,
278            response_time_ms: response_time,
279            bytes_sent: None,
280            referer: None,
281            query_string: None,
282            headers,
283            session_id: None,
284        };
285
286        self.write_log_entry(entry).await
287    }
288
289    pub async fn write_log_entry(&self, entry: ServerLogEntry) -> Result<()> {
290        self.check_and_rotate_if_needed().await?;
291
292        let json_line = serde_json::to_string(&entry)
293            .map_err(|e| AppError::Validation(format!("Failed to serialize log entry: {}", e)))?;
294
295        let mut file = tokio::fs::OpenOptions::new()
296            .create(true)
297            .append(true)
298            .open(&self.log_file_path)
299            .await
300            .map_err(AppError::Io)?;
301
302        use tokio::io::AsyncWriteExt;
303        file.write_all(format!("{}\n", json_line).as_bytes())
304            .await
305            .map_err(AppError::Io)?;
306        file.flush().await.map_err(AppError::Io)?;
307        Ok(())
308    }
309
310    async fn check_and_rotate_if_needed(&self) -> Result<()> {
311        if !self.log_file_path.exists() {
312            return Ok(());
313        }
314
315        let metadata = tokio::fs::metadata(&self.log_file_path)
316            .await
317            .map_err(AppError::Io)?;
318        if metadata.len() >= self.config.max_file_size_bytes {
319            self.rotate_log_files().await?;
320        }
321        Ok(())
322    }
323
324    async fn rotate_log_files(&self) -> Result<()> {
325        let base_path = &self.log_file_path;
326        let base_name = base_path
327            .file_stem()
328            .unwrap_or_else(|| std::ffi::OsStr::new("server"))
329            .to_string_lossy();
330        let parent_dir = match base_path.parent() {
331            Some(dir) => dir,
332            None => return Ok(()),
333        };
334
335        // Rotate existing archives
336        for i in (1..self.config.max_archive_files).rev() {
337            let old_path = parent_dir.join(format!("{}.{}.log.gz", base_name, i));
338            let new_path = parent_dir.join(format!("{}.{}.log.gz", base_name, i + 1));
339
340            if old_path.exists() {
341                tokio::fs::rename(&old_path, &new_path)
342                    .await
343                    .map_err(AppError::Io)?;
344            }
345        }
346
347        // Move current log to archive
348        let archive_path = parent_dir.join(format!("{}.1.log", base_name));
349        tokio::fs::rename(base_path, &archive_path)
350            .await
351            .map_err(AppError::Io)?;
352
353        if self.config.compress_archives {
354            self.compress_log_file(&archive_path).await?;
355        }
356
357        // Cleanup old files
358        let cleanup_path = parent_dir.join(format!(
359            "{}.{}.log.gz",
360            base_name,
361            self.config.max_archive_files + 1
362        ));
363        if cleanup_path.exists() {
364            tokio::fs::remove_file(&cleanup_path)
365                .await
366                .map_err(AppError::Io)?;
367        }
368
369        Ok(())
370    }
371
372    async fn compress_log_file(&self, file_path: &std::path::Path) -> Result<()> {
373        use flate2::write::GzEncoder;
374        use flate2::Compression;
375        use std::io::Write;
376
377        let content = tokio::fs::read(file_path).await.map_err(AppError::Io)?;
378        let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
379        encoder.write_all(&content).map_err(AppError::Io)?;
380        let compressed = encoder.finish().map_err(AppError::Io)?;
381
382        let gz_path = file_path.with_file_name(format!(
383            "{}.gz",
384            file_path
385                .file_name()
386                .ok_or_else(|| AppError::Validation("Invalid file path".to_string()))?
387                .to_string_lossy()
388        ));
389
390        tokio::fs::write(&gz_path, compressed)
391            .await
392            .map_err(AppError::Io)?;
393        tokio::fs::remove_file(file_path)
394            .await
395            .map_err(AppError::Io)?;
396
397        Ok(())
398    }
399
400    pub fn get_log_file_size_bytes(&self) -> Result<u64> {
401        if !self.log_file_path.exists() {
402            return Ok(0);
403        }
404        let metadata = std::fs::metadata(&self.log_file_path).map_err(AppError::Io)?;
405        Ok(metadata.len())
406    }
407
408    pub fn list_log_files(&self) -> Result<Vec<PathBuf>> {
409        let parent_dir = match self.log_file_path.parent() {
410            Some(dir) => dir,
411            None => return Ok(Vec::new()),
412        };
413        let base_name = self
414            .log_file_path
415            .file_stem()
416            .unwrap_or_else(|| std::ffi::OsStr::new("server"))
417            .to_string_lossy();
418        let mut files = Vec::new();
419
420        if self.log_file_path.exists() {
421            files.push(self.log_file_path.clone());
422        }
423
424        for i in 1..=10 {
425            for ext in &["log", "log.gz"] {
426                let path = parent_dir.join(format!("{}.{}.{}", base_name, i, ext));
427                if path.exists() {
428                    files.push(path);
429                    break; // Only add one version (prefer compressed)
430                }
431            }
432        }
433
434        Ok(files)
435    }
436
437    // Compute request statistics from the log file
438    pub async fn get_request_stats(&self) -> Result<ServerStats> {
439        use tokio::io::{AsyncBufReadExt, BufReader};
440
441        if !self.log_file_path.exists() {
442            return Ok(ServerStats::default());
443        }
444
445        let file = tokio::fs::File::open(&self.log_file_path)
446            .await
447            .map_err(AppError::Io)?;
448        let mut reader = BufReader::new(file).lines();
449
450        let mut stats = ServerStats::default();
451        let mut unique_ips = std::collections::HashSet::new();
452        let mut response_times = Vec::new();
453
454        while let Some(line) = reader.next_line().await.map_err(AppError::Io)? {
455            if let Ok(entry) = serde_json::from_str::<ServerLogEntry>(&line) {
456                match entry.event_type {
457                    LogEventType::Request => {
458                        stats.total_requests += 1;
459                        unique_ips.insert(entry.ip_address);
460
461                        if let Some(status) = entry.status_code {
462                            if status >= 400 {
463                                stats.error_requests += 1;
464                            }
465                        }
466                        if let Some(rt) = entry.response_time_ms {
467                            response_times.push(rt);
468                        }
469                        if let Some(bytes) = entry.bytes_sent {
470                            stats.total_bytes_sent += bytes;
471                        }
472                    }
473                    LogEventType::SecurityAlert => stats.security_alerts += 1,
474                    LogEventType::PerformanceWarning => stats.performance_warnings += 1,
475                    _ => {}
476                }
477            }
478        }
479
480        stats.unique_ips = unique_ips.len() as u64;
481        if !response_times.is_empty() {
482            stats.avg_response_time =
483                response_times.iter().sum::<u64>() / response_times.len() as u64;
484            stats.max_response_time = *response_times.iter().max().unwrap_or(&0);
485        }
486
487        Ok(stats)
488    }
489
490    pub fn get_config_summary(&self) -> String {
491        format!(
492            "Log Config: {}MB max, {} archives, compression: {}, requests: {}, security: {}, performance: {}",
493            self.config.max_file_size_bytes / 1024 / 1024,
494            self.config.max_archive_files,
495            self.config.compress_archives,
496            self.log_requests,
497            self.log_security,
498            self.log_performance
499        )
500    }
501}
502
503#[derive(Debug, Default)]
504pub struct ServerStats {
505    pub total_requests: u64,
506    pub unique_ips: u64,
507    pub error_requests: u64,
508    pub security_alerts: u64,
509    pub performance_warnings: u64,
510    pub total_bytes_sent: u64,
511    pub avg_response_time: u64,
512    pub max_response_time: u64,
513}