1use chrono::{DateTime, Utc};
6use serde::{Deserialize, Serialize};
7use std::collections::HashMap;
8
9#[derive(Debug, Clone)]
11pub struct LogMetrics {
12 total_logs: u64,
13 logs_by_level: HashMap<String, u64>,
14 logs_by_category: HashMap<String, u64>,
15 bytes_logged: u64,
16 errors_count: u64,
17 rate_limited_count: u64,
18 redacted_count: u64,
19 encrypted_count: u64,
20 start_time: DateTime<Utc>,
21 last_log_time: Option<DateTime<Utc>>,
22 peak_rate: u64,
23 current_rate: u64,
24}
25
26impl LogMetrics {
27 pub fn new() -> Self {
29 Self {
30 total_logs: 0,
31 logs_by_level: HashMap::new(),
32 logs_by_category: HashMap::new(),
33 bytes_logged: 0,
34 errors_count: 0,
35 rate_limited_count: 0,
36 redacted_count: 0,
37 encrypted_count: 0,
38 start_time: Utc::now(),
39 last_log_time: None,
40 peak_rate: 0,
41 current_rate: 0,
42 }
43 }
44
45 pub fn record_log(&mut self, level: &str, category: Option<&str>, bytes: usize) {
47 self.total_logs += 1;
48 self.bytes_logged += bytes as u64;
49 self.last_log_time = Some(Utc::now());
50
51 *self.logs_by_level.entry(level.to_string()).or_insert(0) += 1;
52
53 if let Some(cat) = category {
54 *self.logs_by_category.entry(cat.to_string()).or_insert(0) += 1;
55 }
56 }
57
58 pub fn record_error(&mut self) {
60 self.errors_count += 1;
61 }
62
63 pub fn record_rate_limited(&mut self) {
65 self.rate_limited_count += 1;
66 }
67
68 pub fn record_redaction(&mut self) {
70 self.redacted_count += 1;
71 }
72
73 pub fn record_encryption(&mut self) {
75 self.encrypted_count += 1;
76 }
77
78 pub fn update_rate(&mut self, rate: u64) {
80 self.current_rate = rate;
81 if rate > self.peak_rate {
82 self.peak_rate = rate;
83 }
84 }
85
86 pub fn snapshot(&self) -> MetricsSnapshot {
88 let uptime = Utc::now().signed_duration_since(self.start_time);
89 let uptime_secs = uptime.num_seconds().max(1) as f64;
90
91 MetricsSnapshot {
92 total_logs: self.total_logs,
93 logs_by_level: self.logs_by_level.clone(),
94 logs_by_category: self.logs_by_category.clone(),
95 bytes_logged: self.bytes_logged,
96 errors_count: self.errors_count,
97 rate_limited_count: self.rate_limited_count,
98 redacted_count: self.redacted_count,
99 encrypted_count: self.encrypted_count,
100 uptime_seconds: uptime.num_seconds() as u64,
101 average_rate: self.total_logs as f64 / uptime_secs,
102 peak_rate: self.peak_rate,
103 current_rate: self.current_rate,
104 last_log_time: self.last_log_time,
105 snapshot_time: Utc::now(),
106 }
107 }
108
109 pub fn reset(&mut self) {
111 *self = Self::new();
112 }
113}
114
115impl Default for LogMetrics {
116 fn default() -> Self {
117 Self::new()
118 }
119}
120
121#[derive(Debug, Clone, Serialize, Deserialize)]
123pub struct MetricsSnapshot {
124 pub total_logs: u64,
125 pub logs_by_level: HashMap<String, u64>,
126 pub logs_by_category: HashMap<String, u64>,
127 pub bytes_logged: u64,
128 pub errors_count: u64,
129 pub rate_limited_count: u64,
130 pub redacted_count: u64,
131 pub encrypted_count: u64,
132 pub uptime_seconds: u64,
133 pub average_rate: f64,
134 pub peak_rate: u64,
135 pub current_rate: u64,
136 pub last_log_time: Option<DateTime<Utc>>,
137 pub snapshot_time: DateTime<Utc>,
138}
139
140impl MetricsSnapshot {
141 pub fn to_json(&self) -> Result<String, serde_json::Error> {
143 serde_json::to_string_pretty(self)
144 }
145
146 pub fn summary(&self) -> String {
148 format!(
149 "Logs: {} | Errors: {} | Rate Limited: {} | Redacted: {} | Encrypted: {} | Avg Rate: {:.2}/s",
150 self.total_logs,
151 self.errors_count,
152 self.rate_limited_count,
153 self.redacted_count,
154 self.encrypted_count,
155 self.average_rate
156 )
157 }
158
159 pub fn bytes_human_readable(&self) -> String {
161 let bytes = self.bytes_logged;
162 if bytes < 1024 {
163 format!("{} B", bytes)
164 } else if bytes < 1024 * 1024 {
165 format!("{:.2} KB", bytes as f64 / 1024.0)
166 } else if bytes < 1024 * 1024 * 1024 {
167 format!("{:.2} MB", bytes as f64 / (1024.0 * 1024.0))
168 } else {
169 format!("{:.2} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
170 }
171 }
172
173 pub fn error_percentage(&self) -> f64 {
175 if self.total_logs == 0 {
176 0.0
177 } else {
178 (self.errors_count as f64 / self.total_logs as f64) * 100.0
179 }
180 }
181
182 pub fn redaction_percentage(&self) -> f64 {
184 if self.total_logs == 0 {
185 0.0
186 } else {
187 (self.redacted_count as f64 / self.total_logs as f64) * 100.0
188 }
189 }
190}
191
192pub struct MetricsAggregator {
194 snapshots: Vec<MetricsSnapshot>,
195}
196
197impl MetricsAggregator {
198 pub fn new() -> Self {
200 Self {
201 snapshots: Vec::new(),
202 }
203 }
204
205 pub fn add_snapshot(&mut self, snapshot: MetricsSnapshot) {
207 self.snapshots.push(snapshot);
208 }
209
210 pub fn aggregate(&self) -> AggregatedMetrics {
212 let total_logs: u64 = self.snapshots.iter().map(|s| s.total_logs).sum();
213 let total_bytes: u64 = self.snapshots.iter().map(|s| s.bytes_logged).sum();
214 let total_errors: u64 = self.snapshots.iter().map(|s| s.errors_count).sum();
215 let total_rate_limited: u64 = self.snapshots.iter().map(|s| s.rate_limited_count).sum();
216 let peak_rate: u64 = self.snapshots.iter().map(|s| s.peak_rate).max().unwrap_or(0);
217
218 AggregatedMetrics {
219 logger_count: self.snapshots.len(),
220 total_logs,
221 total_bytes,
222 total_errors,
223 total_rate_limited,
224 peak_rate,
225 }
226 }
227}
228
229impl Default for MetricsAggregator {
230 fn default() -> Self {
231 Self::new()
232 }
233}
234
235#[derive(Debug, Clone, Serialize, Deserialize)]
237pub struct AggregatedMetrics {
238 pub logger_count: usize,
239 pub total_logs: u64,
240 pub total_bytes: u64,
241 pub total_errors: u64,
242 pub total_rate_limited: u64,
243 pub peak_rate: u64,
244}
245
246#[cfg(test)]
247mod tests {
248 use super::*;
249
250 #[test]
251 fn test_metrics_recording() {
252 let mut metrics = LogMetrics::new();
253
254 metrics.record_log("INFO", Some("auth"), 100);
255 metrics.record_log("ERROR", Some("auth"), 200);
256 metrics.record_log("INFO", None, 50);
257
258 let snapshot = metrics.snapshot();
259 assert_eq!(snapshot.total_logs, 3);
260 assert_eq!(snapshot.bytes_logged, 350);
261 assert_eq!(snapshot.logs_by_level.get("INFO"), Some(&2));
262 assert_eq!(snapshot.logs_by_level.get("ERROR"), Some(&1));
263 assert_eq!(snapshot.logs_by_category.get("auth"), Some(&2));
264 }
265
266 #[test]
267 fn test_error_recording() {
268 let mut metrics = LogMetrics::new();
269
270 metrics.record_error();
271 metrics.record_error();
272
273 let snapshot = metrics.snapshot();
274 assert_eq!(snapshot.errors_count, 2);
275 }
276
277 #[test]
278 fn test_rate_limiting_recording() {
279 let mut metrics = LogMetrics::new();
280
281 metrics.record_rate_limited();
282 metrics.record_rate_limited();
283 metrics.record_rate_limited();
284
285 let snapshot = metrics.snapshot();
286 assert_eq!(snapshot.rate_limited_count, 3);
287 }
288
289 #[test]
290 fn test_bytes_human_readable() {
291 let mut metrics = LogMetrics::new();
292
293 metrics.record_log("INFO", None, 500);
294 let snapshot = metrics.snapshot();
295 assert_eq!(snapshot.bytes_human_readable(), "500 B");
296
297 let mut metrics2 = LogMetrics::new();
298 for _ in 0..1000 {
299 metrics2.record_log("INFO", None, 1024);
300 }
301 let snapshot2 = metrics2.snapshot();
302 assert!(snapshot2.bytes_human_readable().contains("KB") || snapshot2.bytes_human_readable().contains("MB"));
303 }
304
305 #[test]
306 fn test_summary() {
307 let mut metrics = LogMetrics::new();
308
309 for _ in 0..100 {
310 metrics.record_log("INFO", None, 100);
311 }
312 metrics.record_error();
313
314 let snapshot = metrics.snapshot();
315 let summary = snapshot.summary();
316 assert!(summary.contains("100"));
317 assert!(summary.contains("Errors: 1"));
318 }
319
320 #[test]
321 fn test_aggregator() {
322 let mut agg = MetricsAggregator::new();
323
324 let mut metrics1 = LogMetrics::new();
325 metrics1.record_log("INFO", None, 100);
326 agg.add_snapshot(metrics1.snapshot());
327
328 let mut metrics2 = LogMetrics::new();
329 metrics2.record_log("INFO", None, 200);
330 metrics2.record_log("ERROR", None, 150);
331 agg.add_snapshot(metrics2.snapshot());
332
333 let aggregated = agg.aggregate();
334 assert_eq!(aggregated.logger_count, 2);
335 assert_eq!(aggregated.total_logs, 3);
336 assert_eq!(aggregated.total_bytes, 450);
337 }
338
339 #[test]
340 fn test_error_percentage() {
341 let mut metrics = LogMetrics::new();
342
343 for _ in 0..100 {
344 metrics.record_log("INFO", None, 10);
345 }
346 for _ in 0..10 {
347 metrics.record_error();
348 }
349
350 let snapshot = metrics.snapshot();
351 assert!((snapshot.error_percentage() - 10.0).abs() < 0.01);
352 }
353
354 #[test]
355 fn test_reset() {
356 let mut metrics = LogMetrics::new();
357
358 metrics.record_log("INFO", None, 100);
359 metrics.record_error();
360
361 metrics.reset();
362
363 let snapshot = metrics.snapshot();
364 assert_eq!(snapshot.total_logs, 0);
365 assert_eq!(snapshot.errors_count, 0);
366 }
367}