rush_sync_server/server/
logging.rs

1// ## FILE: src/server/logging.rs - ALLE FIXES APPLIED
2use crate::core::config::LoggingConfig;
3use crate::core::prelude::*;
4use actix_web::HttpMessage; // HINZUGEFÜGT
5use serde::{Deserialize, Serialize};
6use std::collections::HashMap;
7use std::path::PathBuf;
8use std::time::{SystemTime, UNIX_EPOCH};
9
10#[derive(Debug, Serialize, Deserialize, Clone)]
11pub struct ServerLogEntry {
12    pub timestamp: String,
13    pub timestamp_unix: u64,
14    pub event_type: LogEventType,
15    pub ip_address: String,
16    pub user_agent: Option<String>,
17    pub method: String,
18    pub path: String,
19    pub status_code: Option<u16>,
20    pub response_time_ms: Option<u64>,
21    pub bytes_sent: Option<u64>,
22    pub referer: Option<String>,
23    pub query_string: Option<String>,
24    pub headers: HashMap<String, String>,
25    pub session_id: Option<String>,
26}
27
28// Copy-Trait hinzugefügt für LogEventType
29#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
30pub enum LogEventType {
31    Request,
32    ServerStart,
33    ServerStop,
34    ServerError,
35    SecurityAlert,
36    PerformanceWarning,
37}
38
39#[derive(Debug, Clone)]
40pub struct LogRotationConfig {
41    pub max_file_size_bytes: u64,
42    pub max_archive_files: u8,
43    pub compress_archives: bool,
44}
45
46impl From<&LoggingConfig> for LogRotationConfig {
47    fn from(config: &LoggingConfig) -> Self {
48        Self {
49            max_file_size_bytes: config.max_file_size_mb * 1024 * 1024,
50            max_archive_files: config.max_archive_files,
51            compress_archives: config.compress_archives,
52        }
53    }
54}
55
56impl Default for LogRotationConfig {
57    fn default() -> Self {
58        Self {
59            max_file_size_bytes: 100 * 1024 * 1024,
60            max_archive_files: 9,
61            compress_archives: true,
62        }
63    }
64}
65
66pub struct ServerLogger {
67    log_file_path: PathBuf,
68    config: LogRotationConfig,
69    log_requests: bool,
70    log_security: bool,
71    log_performance: bool,
72}
73
74impl ServerLogger {
75    pub fn new_with_config(
76        server_name: &str,
77        port: u16,
78        logging_config: &LoggingConfig,
79    ) -> Result<Self> {
80        let exe_path = std::env::current_exe().map_err(AppError::Io)?;
81        let base_dir = exe_path.parent().ok_or_else(|| {
82            AppError::Validation("Cannot determine executable directory".to_string())
83        })?;
84
85        let log_file_path = base_dir
86            .join(".rss")
87            .join("servers")
88            .join(format!("{}-[{}].log", server_name, port));
89
90        if let Some(parent) = log_file_path.parent() {
91            std::fs::create_dir_all(parent).map_err(AppError::Io)?;
92        }
93
94        Ok(Self {
95            log_file_path,
96            config: LogRotationConfig::from(logging_config),
97            log_requests: logging_config.log_requests,
98            log_security: logging_config.log_security_alerts,
99            log_performance: logging_config.log_performance,
100        })
101    }
102
103    pub fn new(server_name: &str, port: u16) -> Result<Self> {
104        Self::new_with_config(server_name, port, &LoggingConfig::default())
105    }
106
107    // Vereinfachte System-Log Methoden
108    pub async fn log_server_start(&self) -> Result<()> {
109        self.write_system_entry(LogEventType::ServerStart).await
110    }
111
112    pub async fn log_server_stop(&self) -> Result<()> {
113        self.write_system_entry(LogEventType::ServerStop).await
114    }
115
116    async fn write_system_entry(&self, event_type: LogEventType) -> Result<()> {
117        let entry = ServerLogEntry {
118            timestamp: chrono::Local::now()
119                .format("%Y-%m-%d %H:%M:%S%.3f")
120                .to_string(),
121            timestamp_unix: SystemTime::now()
122                .duration_since(UNIX_EPOCH)
123                .unwrap_or_default()
124                .as_secs(),
125            event_type,
126            ip_address: "127.0.0.1".to_string(),
127            user_agent: None,
128            method: "SYSTEM".to_string(),
129            path: "/".to_string(),
130            status_code: None,
131            response_time_ms: None,
132            bytes_sent: None,
133            referer: None,
134            query_string: None,
135            headers: HashMap::new(),
136            session_id: None,
137        };
138        self.write_log_entry(entry).await
139    }
140
141    // Optimierte Request Logging
142    pub async fn log_request(
143        &self,
144        req: &actix_web::HttpRequest,
145        status: u16,
146        response_time: u64,
147        bytes_sent: u64,
148    ) -> Result<()> {
149        if !self.log_requests {
150            return Ok(());
151        }
152
153        let ip = {
154            let connection_info = req.connection_info();
155            connection_info
156                .realip_remote_addr()
157                .or_else(|| connection_info.peer_addr())
158                .unwrap_or("unknown")
159                .split(':')
160                .next()
161                .unwrap_or("unknown")
162                .to_string()
163        };
164
165        // Nur relevante Headers filtern
166        let headers = req
167            .headers()
168            .iter()
169            .filter_map(|(name, value)| {
170                let header_name = name.as_str().to_lowercase();
171                match header_name.as_str() {
172                    "authorization" | "cookie" | "x-api-key" => {
173                        Some((name.as_str().to_string(), "[FILTERED]".to_string()))
174                    }
175                    _ => value
176                        .to_str()
177                        .ok()
178                        .map(|v| (name.as_str().to_string(), v.to_string())),
179                }
180            })
181            .collect();
182
183        let entry = ServerLogEntry {
184            timestamp: chrono::Local::now()
185                .format("%Y-%m-%d %H:%M:%S%.3f")
186                .to_string(),
187            timestamp_unix: SystemTime::now()
188                .duration_since(UNIX_EPOCH)
189                .unwrap_or_default()
190                .as_secs(),
191            event_type: LogEventType::Request,
192            ip_address: ip.to_string(),
193            user_agent: req
194                .headers()
195                .get("user-agent")
196                .and_then(|h| h.to_str().ok())
197                .map(String::from),
198            method: req.method().to_string(),
199            path: req.path().to_string(),
200            status_code: Some(status),
201            response_time_ms: Some(response_time),
202            bytes_sent: Some(bytes_sent),
203            referer: req
204                .headers()
205                .get("referer")
206                .and_then(|h| h.to_str().ok())
207                .map(String::from),
208            query_string: if req.query_string().is_empty() {
209                None
210            } else {
211                Some(req.query_string().to_string())
212            },
213            headers,
214            session_id: req.extensions().get::<String>().cloned(), // Jetzt funktioniert es mit HttpMessage import
215        };
216
217        self.write_log_entry(entry).await
218    }
219
220    // Vereinfachte Alert-Logging
221    pub async fn log_security_alert(&self, ip: &str, reason: &str, details: &str) -> Result<()> {
222        if !self.log_security {
223            return Ok(());
224        }
225        self.write_alert_entry(LogEventType::SecurityAlert, ip, reason, details, None)
226            .await
227    }
228
229    pub async fn log_performance_warning(
230        &self,
231        metric: &str,
232        value: u64,
233        threshold: u64,
234    ) -> Result<()> {
235        if !self.log_performance {
236            return Ok(());
237        }
238        self.write_alert_entry(
239            LogEventType::PerformanceWarning,
240            "127.0.0.1",
241            metric,
242            &format!("value={}, threshold={}", value, threshold),
243            Some(value),
244        )
245        .await
246    }
247
248    // KORRIGIERTE write_alert_entry Method
249    async fn write_alert_entry(
250        &self,
251        event_type: LogEventType,
252        ip: &str,
253        reason: &str,
254        details: &str,
255        response_time: Option<u64>,
256    ) -> Result<()> {
257        let mut headers = HashMap::new();
258        headers.insert("alert_reason".to_string(), reason.to_string());
259        headers.insert("alert_details".to_string(), details.to_string());
260
261        let method_name = match event_type {
262            LogEventType::SecurityAlert => "SECURITY",
263            LogEventType::PerformanceWarning => "PERFORMANCE",
264            _ => "SYSTEM",
265        };
266
267        let entry = ServerLogEntry {
268            timestamp: chrono::Local::now()
269                .format("%Y-%m-%d %H:%M:%S%.3f")
270                .to_string(),
271            timestamp_unix: SystemTime::now()
272                .duration_since(UNIX_EPOCH)
273                .unwrap_or_default()
274                .as_secs(),
275            event_type, // Jetzt Copy-able, kein Move-Problem mehr
276            ip_address: ip.to_string(),
277            user_agent: None,
278            method: method_name.to_string(),
279            path: "/".to_string(),
280            status_code: None,
281            response_time_ms: response_time,
282            bytes_sent: None,
283            referer: None,
284            query_string: None,
285            headers,
286            session_id: None,
287        };
288
289        self.write_log_entry(entry).await
290    }
291
292    pub async fn write_log_entry(&self, entry: ServerLogEntry) -> Result<()> {
293        self.check_and_rotate_if_needed().await?;
294
295        let json_line = serde_json::to_string(&entry)
296            .map_err(|e| AppError::Validation(format!("Failed to serialize log entry: {}", e)))?;
297
298        let mut file = tokio::fs::OpenOptions::new()
299            .create(true)
300            .append(true)
301            .open(&self.log_file_path)
302            .await
303            .map_err(AppError::Io)?;
304
305        use tokio::io::AsyncWriteExt;
306        file.write_all(format!("{}\n", json_line).as_bytes())
307            .await
308            .map_err(AppError::Io)?;
309        file.flush().await.map_err(AppError::Io)?;
310        Ok(())
311    }
312
313    async fn check_and_rotate_if_needed(&self) -> Result<()> {
314        if !self.log_file_path.exists() {
315            return Ok(());
316        }
317
318        let metadata = tokio::fs::metadata(&self.log_file_path)
319            .await
320            .map_err(AppError::Io)?;
321        if metadata.len() >= self.config.max_file_size_bytes {
322            self.rotate_log_files().await?;
323        }
324        Ok(())
325    }
326
327    async fn rotate_log_files(&self) -> Result<()> {
328        let base_path = &self.log_file_path;
329        let base_name = base_path.file_stem().unwrap().to_string_lossy();
330        let parent_dir = base_path.parent().unwrap();
331
332        // Rotate existing archives
333        for i in (1..self.config.max_archive_files).rev() {
334            let old_path = parent_dir.join(format!("{}.{}.log.gz", base_name, i));
335            let new_path = parent_dir.join(format!("{}.{}.log.gz", base_name, i + 1));
336
337            if old_path.exists() {
338                tokio::fs::rename(&old_path, &new_path)
339                    .await
340                    .map_err(AppError::Io)?;
341            }
342        }
343
344        // Move current log to archive
345        let archive_path = parent_dir.join(format!("{}.1.log", base_name));
346        tokio::fs::rename(base_path, &archive_path)
347            .await
348            .map_err(AppError::Io)?;
349
350        if self.config.compress_archives {
351            self.compress_log_file(&archive_path).await?;
352        }
353
354        // Cleanup old files
355        let cleanup_path = parent_dir.join(format!(
356            "{}.{}.log.gz",
357            base_name,
358            self.config.max_archive_files + 1
359        ));
360        if cleanup_path.exists() {
361            tokio::fs::remove_file(&cleanup_path)
362                .await
363                .map_err(AppError::Io)?;
364        }
365
366        Ok(())
367    }
368
369    async fn compress_log_file(&self, file_path: &std::path::Path) -> Result<()> {
370        use flate2::write::GzEncoder;
371        use flate2::Compression;
372        use std::io::Write;
373
374        let content = tokio::fs::read(file_path).await.map_err(AppError::Io)?;
375        let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
376        encoder.write_all(&content).map_err(AppError::Io)?;
377        let compressed = encoder.finish().map_err(AppError::Io)?;
378
379        let gz_path = file_path.with_file_name(format!(
380            "{}.gz",
381            file_path
382                .file_name()
383                .ok_or_else(|| AppError::Validation("Invalid file path".to_string()))?
384                .to_string_lossy()
385        ));
386
387        tokio::fs::write(&gz_path, compressed)
388            .await
389            .map_err(AppError::Io)?;
390        tokio::fs::remove_file(file_path)
391            .await
392            .map_err(AppError::Io)?;
393
394        Ok(())
395    }
396
397    pub fn get_log_file_size_bytes(&self) -> Result<u64> {
398        if !self.log_file_path.exists() {
399            return Ok(0);
400        }
401        let metadata = std::fs::metadata(&self.log_file_path).map_err(AppError::Io)?;
402        Ok(metadata.len())
403    }
404
405    pub fn list_log_files(&self) -> Result<Vec<PathBuf>> {
406        let parent_dir = self.log_file_path.parent().unwrap();
407        let base_name = self.log_file_path.file_stem().unwrap().to_string_lossy();
408        let mut files = Vec::new();
409
410        if self.log_file_path.exists() {
411            files.push(self.log_file_path.clone());
412        }
413
414        for i in 1..=10 {
415            for ext in &["log", "log.gz"] {
416                let path = parent_dir.join(format!("{}.{}.{}", base_name, i, ext));
417                if path.exists() {
418                    files.push(path);
419                    break; // Only add one version (prefer compressed)
420                }
421            }
422        }
423
424        Ok(files)
425    }
426
427    // Optimierte Stats-Berechnung
428    pub async fn get_request_stats(&self) -> Result<ServerStats> {
429        use tokio::io::{AsyncBufReadExt, BufReader};
430
431        if !self.log_file_path.exists() {
432            return Ok(ServerStats::default());
433        }
434
435        let file = tokio::fs::File::open(&self.log_file_path)
436            .await
437            .map_err(AppError::Io)?;
438        let mut reader = BufReader::new(file).lines();
439
440        let mut stats = ServerStats::default();
441        let mut unique_ips = std::collections::HashSet::new();
442        let mut response_times = Vec::new();
443
444        while let Some(line) = reader.next_line().await.map_err(AppError::Io)? {
445            if let Ok(entry) = serde_json::from_str::<ServerLogEntry>(&line) {
446                match entry.event_type {
447                    LogEventType::Request => {
448                        stats.total_requests += 1;
449                        unique_ips.insert(entry.ip_address);
450
451                        if let Some(status) = entry.status_code {
452                            if status >= 400 {
453                                stats.error_requests += 1;
454                            }
455                        }
456                        if let Some(rt) = entry.response_time_ms {
457                            response_times.push(rt);
458                        }
459                        if let Some(bytes) = entry.bytes_sent {
460                            stats.total_bytes_sent += bytes;
461                        }
462                    }
463                    LogEventType::SecurityAlert => stats.security_alerts += 1,
464                    LogEventType::PerformanceWarning => stats.performance_warnings += 1,
465                    _ => {}
466                }
467            }
468        }
469
470        stats.unique_ips = unique_ips.len() as u64;
471        if !response_times.is_empty() {
472            stats.avg_response_time =
473                response_times.iter().sum::<u64>() / response_times.len() as u64;
474            stats.max_response_time = *response_times.iter().max().unwrap_or(&0);
475        }
476
477        Ok(stats)
478    }
479
480    pub fn get_config_summary(&self) -> String {
481        format!(
482            "Log Config: {}MB max, {} archives, compression: {}, requests: {}, security: {}, performance: {}",
483            self.config.max_file_size_bytes / 1024 / 1024,
484            self.config.max_archive_files,
485            self.config.compress_archives,
486            self.log_requests,
487            self.log_security,
488            self.log_performance
489        )
490    }
491}
492
493#[derive(Debug, Default)]
494pub struct ServerStats {
495    pub total_requests: u64,
496    pub unique_ips: u64,
497    pub error_requests: u64,
498    pub security_alerts: u64,
499    pub performance_warnings: u64,
500    pub total_bytes_sent: u64,
501    pub avg_response_time: u64,
502    pub max_response_time: u64,
503}