leptos_sync_core/reliability/
monitoring.rs

1//! Monitoring System
2//!
3//! This module provides comprehensive monitoring capabilities including:
4//! - Metrics collection and aggregation
5//! - Alert management and notification
6//! - Health reporting and status monitoring
7//! - Performance tracking and analysis
8
9use std::collections::HashMap;
10use std::sync::Arc;
11use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
12use tokio::sync::RwLock;
13use serde::{Deserialize, Serialize};
14
15/// Reliability monitoring system
16#[derive(Debug, Clone)]
17pub struct ReliabilityMonitor {
18    /// Metrics collector
19    metrics_collector: MetricsCollector,
20    /// Alert manager
21    alert_manager: AlertManager,
22    /// Health reporter
23    health_reporter: HealthReporter,
24    /// Monitoring statistics
25    stats: Arc<RwLock<MonitoringStats>>,
26    /// Whether the system is initialized
27    initialized: bool,
28}
29
30impl ReliabilityMonitor {
31    /// Create a new reliability monitor
32    pub fn new() -> Self {
33        Self {
34            metrics_collector: MetricsCollector::new(),
35            alert_manager: AlertManager::new(),
36            health_reporter: HealthReporter::new(),
37            stats: Arc::new(RwLock::new(MonitoringStats::new())),
38            initialized: false,
39        }
40    }
41    
42    /// Create a new reliability monitor with configuration
43    pub fn with_config(config: MonitorConfig) -> Self {
44        Self {
45            metrics_collector: MetricsCollector::with_config(config.metrics_config),
46            alert_manager: AlertManager::with_config(config.alert_config),
47            health_reporter: HealthReporter::with_config(config.health_config),
48            stats: Arc::new(RwLock::new(MonitoringStats::new())),
49            initialized: false,
50        }
51    }
52    
53    /// Initialize the monitoring system
54    pub async fn initialize(&mut self) -> Result<(), String> {
55        self.metrics_collector.initialize().await?;
56        self.alert_manager.initialize().await?;
57        self.health_reporter.initialize().await?;
58        
59        let mut stats = self.stats.write().await;
60        stats.reset();
61        
62        self.initialized = true;
63        Ok(())
64    }
65    
66    /// Shutdown the monitoring system
67    pub async fn shutdown(&mut self) -> Result<(), String> {
68        self.initialized = false;
69        Ok(())
70    }
71    
72    /// Check if the system is initialized
73    pub fn is_initialized(&self) -> bool {
74        self.initialized
75    }
76    
77    /// Record a metric
78    pub async fn record_metric(&self, metric: Metric) -> Result<(), String> {
79        if !self.initialized {
80            return Err("Monitoring system not initialized".to_string());
81        }
82        
83        self.metrics_collector.record(metric.clone()).await?;
84        
85        // Check for alerts
86        if let Some(alert) = self.alert_manager.check_metric(&metric).await? {
87            self.alert_manager.send_alert(alert).await?;
88        }
89        
90        // Update statistics
91        let mut stats = self.stats.write().await;
92        stats.record_metric(&metric);
93        
94        Ok(())
95    }
96    
97    /// Get metrics for a specific time range
98    pub async fn get_metrics(&self, metric_name: &str, time_range: TimeRange) -> Result<Vec<Metric>, String> {
99        if !self.initialized {
100            return Err("Monitoring system not initialized".to_string());
101        }
102        
103        self.metrics_collector.get_metrics(metric_name, time_range).await
104    }
105    
106    /// Get aggregated metrics
107    pub async fn get_aggregated_metrics(&self, metric_name: &str, aggregation: AggregationType, time_range: TimeRange) -> Result<AggregatedMetric, String> {
108        if !self.initialized {
109            return Err("Monitoring system not initialized".to_string());
110        }
111        
112        self.metrics_collector.get_aggregated(metric_name, aggregation, time_range).await
113    }
114    
115    /// Get system health status
116    pub async fn get_health_status(&self) -> Result<HealthStatus, String> {
117        if !self.initialized {
118            return Err("Monitoring system not initialized".to_string());
119        }
120        
121        self.health_reporter.get_status().await
122    }
123    
124    /// Get monitoring statistics
125    pub async fn get_stats(&self) -> MonitoringStats {
126        self.stats.read().await.clone()
127    }
128    
129    /// Get system status
130    pub async fn get_status(&self) -> Result<SystemStatus, String> {
131        if !self.initialized {
132            return Err("Monitoring system not initialized".to_string());
133        }
134        
135        let health_status = self.get_health_status().await?;
136        let metrics_count = self.metrics_collector.get_metrics_count().await?;
137        let alerts_count = self.alert_manager.get_active_alerts_count().await?;
138        
139        Ok(SystemStatus {
140            health_status,
141            metrics_count,
142            alerts_count,
143            uptime: self.get_uptime().await,
144            last_updated: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
145        })
146    }
147    
148    /// Get system uptime
149    async fn get_uptime(&self) -> Duration {
150        // This would typically be tracked from system start
151        // For now, return a placeholder
152        Duration::from_secs(3600) // 1 hour
153    }
154}
155
156/// Metrics collector for gathering and storing metrics
157#[derive(Debug, Clone)]
158pub struct MetricsCollector {
159    /// Stored metrics
160    metrics: Arc<RwLock<HashMap<String, Vec<Metric>>>>,
161    /// Maximum number of metrics to store per name
162    max_metrics_per_name: usize,
163    /// Whether the collector is initialized
164    initialized: bool,
165}
166
167impl MetricsCollector {
168    /// Create a new metrics collector
169    pub fn new() -> Self {
170        Self {
171            metrics: Arc::new(RwLock::new(HashMap::new())),
172            max_metrics_per_name: 1000,
173            initialized: false,
174        }
175    }
176    
177    /// Create a new metrics collector with configuration
178    pub fn with_config(config: MetricsConfig) -> Self {
179        Self {
180            metrics: Arc::new(RwLock::new(HashMap::new())),
181            max_metrics_per_name: config.max_metrics_per_name,
182            initialized: false,
183        }
184    }
185    
186    /// Initialize the metrics collector
187    pub async fn initialize(&mut self) -> Result<(), String> {
188        self.initialized = true;
189        Ok(())
190    }
191    
192    /// Record a metric
193    pub async fn record(&self, metric: Metric) -> Result<(), String> {
194        if !self.initialized {
195            return Err("Metrics collector not initialized".to_string());
196        }
197        
198        let mut metrics = self.metrics.write().await;
199        let metric_list = metrics.entry(metric.name.clone()).or_insert_with(Vec::new);
200        
201        // Add the metric
202        metric_list.push(metric);
203        
204        // Trim if we exceed the maximum
205        if metric_list.len() > self.max_metrics_per_name {
206            metric_list.drain(0..metric_list.len() - self.max_metrics_per_name);
207        }
208        
209        Ok(())
210    }
211    
212    /// Get metrics for a specific name and time range
213    pub async fn get_metrics(&self, metric_name: &str, time_range: TimeRange) -> Result<Vec<Metric>, String> {
214        if !self.initialized {
215            return Err("Metrics collector not initialized".to_string());
216        }
217        
218        let metrics = self.metrics.read().await;
219        if let Some(metric_list) = metrics.get(metric_name) {
220            let filtered_metrics: Vec<Metric> = metric_list
221                .iter()
222                .filter(|metric| {
223                    metric.timestamp >= time_range.start && metric.timestamp <= time_range.end
224                })
225                .cloned()
226                .collect();
227            Ok(filtered_metrics)
228        } else {
229            Ok(Vec::new())
230        }
231    }
232    
233    /// Get aggregated metrics
234    pub async fn get_aggregated(&self, metric_name: &str, aggregation: AggregationType, time_range: TimeRange) -> Result<AggregatedMetric, String> {
235        if !self.initialized {
236            return Err("Metrics collector not initialized".to_string());
237        }
238        
239        let metrics = self.get_metrics(metric_name, time_range.clone()).await?;
240        
241        if metrics.is_empty() {
242            return Ok(AggregatedMetric {
243                name: metric_name.to_string(),
244                aggregation,
245                value: 0.0,
246                count: 0,
247                time_range,
248            });
249        }
250        
251        let values: Vec<f64> = metrics.iter().map(|m| m.value).collect();
252        let aggregated_value = match aggregation {
253            AggregationType::Sum => values.iter().sum(),
254            AggregationType::Average => values.iter().sum::<f64>() / values.len() as f64,
255            AggregationType::Min => values.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
256            AggregationType::Max => values.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)),
257            AggregationType::Count => values.len() as f64,
258        };
259        
260        Ok(AggregatedMetric {
261            name: metric_name.to_string(),
262            aggregation,
263            value: aggregated_value,
264            count: metrics.len(),
265            time_range,
266        })
267    }
268    
269    /// Get the number of metrics stored
270    pub async fn get_metrics_count(&self) -> Result<usize, String> {
271        if !self.initialized {
272            return Err("Metrics collector not initialized".to_string());
273        }
274        
275        let metrics = self.metrics.read().await;
276        Ok(metrics.values().map(|v| v.len()).sum())
277    }
278}
279
280/// Alert manager for handling alerts and notifications
281#[derive(Debug, Clone)]
282pub struct AlertManager {
283    /// Alert rules
284    rules: Arc<RwLock<Vec<AlertRule>>>,
285    /// Active alerts
286    active_alerts: Arc<RwLock<Vec<Alert>>>,
287    /// Whether the manager is initialized
288    initialized: bool,
289}
290
291impl AlertManager {
292    /// Create a new alert manager
293    pub fn new() -> Self {
294        Self {
295            rules: Arc::new(RwLock::new(Vec::new())),
296            active_alerts: Arc::new(RwLock::new(Vec::new())),
297            initialized: false,
298        }
299    }
300    
301    /// Create a new alert manager with configuration
302    pub fn with_config(config: AlertConfig) -> Self {
303        Self {
304            rules: Arc::new(RwLock::new(config.rules)),
305            active_alerts: Arc::new(RwLock::new(Vec::new())),
306            initialized: false,
307        }
308    }
309    
310    /// Initialize the alert manager
311    pub async fn initialize(&mut self) -> Result<(), String> {
312        self.initialized = true;
313        Ok(())
314    }
315    
316    /// Check if a metric triggers any alerts
317    pub async fn check_metric(&self, metric: &Metric) -> Result<Option<Alert>, String> {
318        if !self.initialized {
319            return Err("Alert manager not initialized".to_string());
320        }
321        
322        let rules = self.rules.read().await;
323        for rule in rules.iter() {
324            if rule.metric_name == metric.name && rule.condition.evaluate(metric.value) {
325                let alert = Alert {
326                    id: format!("{}-{}", rule.id, metric.timestamp),
327                    rule_id: rule.id.clone(),
328                    metric_name: metric.name.clone(),
329                    value: metric.value,
330                    threshold: rule.condition.threshold,
331                    severity: rule.severity.clone(),
332                    message: rule.message.clone(),
333                    timestamp: metric.timestamp,
334                    acknowledged: false,
335                };
336                return Ok(Some(alert));
337            }
338        }
339        
340        Ok(None)
341    }
342    
343    /// Send an alert
344    pub async fn send_alert(&self, alert: Alert) -> Result<(), String> {
345        if !self.initialized {
346            return Err("Alert manager not initialized".to_string());
347        }
348        
349        let mut active_alerts = self.active_alerts.write().await;
350        active_alerts.push(alert);
351        
352        // Keep only the last 100 alerts
353        if active_alerts.len() > 100 {
354            let len = active_alerts.len();
355            active_alerts.drain(0..len - 100);
356        }
357        
358        Ok(())
359    }
360    
361    /// Get active alerts count
362    pub async fn get_active_alerts_count(&self) -> Result<usize, String> {
363        if !self.initialized {
364            return Err("Alert manager not initialized".to_string());
365        }
366        
367        let active_alerts = self.active_alerts.read().await;
368        Ok(active_alerts.len())
369    }
370}
371
372/// Health reporter for system health monitoring
373#[derive(Debug, Clone)]
374pub struct HealthReporter {
375    /// Health checks
376    health_checks: Arc<RwLock<Vec<HealthCheck>>>,
377    /// Whether the reporter is initialized
378    initialized: bool,
379}
380
381impl HealthReporter {
382    /// Create a new health reporter
383    pub fn new() -> Self {
384        Self {
385            health_checks: Arc::new(RwLock::new(Vec::new())),
386            initialized: false,
387        }
388    }
389    
390    /// Create a new health reporter with configuration
391    pub fn with_config(config: HealthConfig) -> Self {
392        Self {
393            health_checks: Arc::new(RwLock::new(config.health_checks)),
394            initialized: false,
395        }
396    }
397    
398    /// Initialize the health reporter
399    pub async fn initialize(&mut self) -> Result<(), String> {
400        self.initialized = true;
401        Ok(())
402    }
403    
404    /// Get system health status
405    pub async fn get_status(&self) -> Result<HealthStatus, String> {
406        if !self.initialized {
407            return Err("Health reporter not initialized".to_string());
408        }
409        
410        let health_checks = self.health_checks.read().await;
411        let mut overall_healthy = true;
412        
413        for check in health_checks.iter() {
414            if !check.is_healthy {
415                overall_healthy = false;
416                break;
417            }
418        }
419        
420        Ok(if overall_healthy {
421            HealthStatus::Healthy
422        } else {
423            HealthStatus::Unhealthy
424        })
425    }
426}
427
428/// Metric data structure
429#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
430pub struct Metric {
431    /// Metric name
432    pub name: String,
433    /// Metric value
434    pub value: f64,
435    /// Metric timestamp
436    pub timestamp: u64,
437    /// Metric tags
438    pub tags: HashMap<String, String>,
439}
440
441/// Time range for metric queries
442#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
443pub struct TimeRange {
444    /// Start timestamp
445    pub start: u64,
446    /// End timestamp
447    pub end: u64,
448}
449
450/// Aggregation types for metrics
451#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
452pub enum AggregationType {
453    /// Sum of values
454    Sum,
455    /// Average of values
456    Average,
457    /// Minimum value
458    Min,
459    /// Maximum value
460    Max,
461    /// Count of values
462    Count,
463}
464
465/// Aggregated metric result
466#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
467pub struct AggregatedMetric {
468    /// Metric name
469    pub name: String,
470    /// Aggregation type
471    pub aggregation: AggregationType,
472    /// Aggregated value
473    pub value: f64,
474    /// Number of metrics aggregated
475    pub count: usize,
476    /// Time range
477    pub time_range: TimeRange,
478}
479
480/// Alert rule for triggering alerts
481#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
482pub struct AlertRule {
483    /// Rule ID
484    pub id: String,
485    /// Metric name to monitor
486    pub metric_name: String,
487    /// Alert condition
488    pub condition: AlertCondition,
489    /// Alert severity
490    pub severity: AlertSeverity,
491    /// Alert message
492    pub message: String,
493}
494
495/// Alert condition for evaluating metrics
496#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
497pub struct AlertCondition {
498    /// Comparison operator
499    pub operator: ComparisonOperator,
500    /// Threshold value
501    pub threshold: f64,
502}
503
504impl AlertCondition {
505    /// Evaluate the condition against a metric value
506    pub fn evaluate(&self, value: f64) -> bool {
507        match self.operator {
508            ComparisonOperator::GreaterThan => value > self.threshold,
509            ComparisonOperator::GreaterThanOrEqual => value >= self.threshold,
510            ComparisonOperator::LessThan => value < self.threshold,
511            ComparisonOperator::LessThanOrEqual => value <= self.threshold,
512            ComparisonOperator::Equal => (value - self.threshold).abs() < f64::EPSILON,
513            ComparisonOperator::NotEqual => (value - self.threshold).abs() >= f64::EPSILON,
514        }
515    }
516}
517
518/// Comparison operators for alert conditions
519#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
520pub enum ComparisonOperator {
521    /// Greater than
522    GreaterThan,
523    /// Greater than or equal
524    GreaterThanOrEqual,
525    /// Less than
526    LessThan,
527    /// Less than or equal
528    LessThanOrEqual,
529    /// Equal
530    Equal,
531    /// Not equal
532    NotEqual,
533}
534
535/// Alert severity levels
536#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
537pub enum AlertSeverity {
538    /// Low severity
539    Low,
540    /// Medium severity
541    Medium,
542    /// High severity
543    High,
544    /// Critical severity
545    Critical,
546}
547
548/// Alert data structure
549#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
550pub struct Alert {
551    /// Alert ID
552    pub id: String,
553    /// Rule ID that triggered the alert
554    pub rule_id: String,
555    /// Metric name
556    pub metric_name: String,
557    /// Metric value that triggered the alert
558    pub value: f64,
559    /// Threshold value
560    pub threshold: f64,
561    /// Alert severity
562    pub severity: AlertSeverity,
563    /// Alert message
564    pub message: String,
565    /// Alert timestamp
566    pub timestamp: u64,
567    /// Whether the alert has been acknowledged
568    pub acknowledged: bool,
569}
570
571/// Health check data structure
572#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
573pub struct HealthCheck {
574    /// Health check name
575    pub name: String,
576    /// Whether the check is healthy
577    pub is_healthy: bool,
578    /// Health check message
579    pub message: String,
580    /// Last check timestamp
581    pub last_checked: u64,
582}
583
584/// Health status
585#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
586pub enum HealthStatus {
587    /// System is healthy
588    Healthy,
589    /// System is unhealthy
590    Unhealthy,
591}
592
593/// System status
594#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
595pub struct SystemStatus {
596    /// Overall health status
597    pub health_status: HealthStatus,
598    /// Number of metrics collected
599    pub metrics_count: usize,
600    /// Number of active alerts
601    pub alerts_count: usize,
602    /// System uptime
603    pub uptime: Duration,
604    /// Last updated timestamp
605    pub last_updated: u64,
606}
607
608/// Monitoring statistics
609#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
610pub struct MonitoringStats {
611    /// Total metrics recorded
612    pub total_metrics: usize,
613    /// Total alerts triggered
614    pub total_alerts: usize,
615    /// Total health checks performed
616    pub total_health_checks: usize,
617}
618
619impl MonitoringStats {
620    /// Create new monitoring statistics
621    pub fn new() -> Self {
622        Self {
623            total_metrics: 0,
624            total_alerts: 0,
625            total_health_checks: 0,
626        }
627    }
628    
629    /// Reset all statistics
630    pub fn reset(&mut self) {
631        self.total_metrics = 0;
632        self.total_alerts = 0;
633        self.total_health_checks = 0;
634    }
635    
636    /// Record a metric
637    pub fn record_metric(&mut self, _metric: &Metric) {
638        self.total_metrics += 1;
639    }
640}
641
642/// Monitoring configuration
643#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
644pub struct MonitorConfig {
645    /// Metrics configuration
646    pub metrics_config: MetricsConfig,
647    /// Alert configuration
648    pub alert_config: AlertConfig,
649    /// Health configuration
650    pub health_config: HealthConfig,
651}
652
653impl Default for MonitorConfig {
654    fn default() -> Self {
655        Self {
656            metrics_config: MetricsConfig::default(),
657            alert_config: AlertConfig::default(),
658            health_config: HealthConfig::default(),
659        }
660    }
661}
662
663/// Metrics configuration
664#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
665pub struct MetricsConfig {
666    /// Maximum number of metrics to store per name
667    pub max_metrics_per_name: usize,
668}
669
670impl Default for MetricsConfig {
671    fn default() -> Self {
672        Self {
673            max_metrics_per_name: 1000,
674        }
675    }
676}
677
678/// Alert configuration
679#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
680pub struct AlertConfig {
681    /// Alert rules
682    pub rules: Vec<AlertRule>,
683}
684
685impl Default for AlertConfig {
686    fn default() -> Self {
687        Self {
688            rules: Vec::new(),
689        }
690    }
691}
692
693/// Health configuration
694#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
695pub struct HealthConfig {
696    /// Health checks
697    pub health_checks: Vec<HealthCheck>,
698}
699
700impl Default for HealthConfig {
701    fn default() -> Self {
702        Self {
703            health_checks: Vec::new(),
704        }
705    }
706}
707
708#[cfg(test)]
709mod tests {
710    use super::*;
711    
712    #[tokio::test]
713    async fn test_reliability_monitor_creation() {
714        let monitor = ReliabilityMonitor::new();
715        assert!(!monitor.is_initialized());
716    }
717    
718    #[tokio::test]
719    async fn test_reliability_monitor_initialization() {
720        let mut monitor = ReliabilityMonitor::new();
721        let result = monitor.initialize().await;
722        assert!(result.is_ok());
723        assert!(monitor.is_initialized());
724    }
725    
726    #[tokio::test]
727    async fn test_reliability_monitor_shutdown() {
728        let mut monitor = ReliabilityMonitor::new();
729        monitor.initialize().await.unwrap();
730        let result = monitor.shutdown().await;
731        assert!(result.is_ok());
732        assert!(!monitor.is_initialized());
733    }
734    
735    #[tokio::test]
736    async fn test_metric_recording() {
737        let mut monitor = ReliabilityMonitor::new();
738        monitor.initialize().await.unwrap();
739        
740        let metric = Metric {
741            name: "test_metric".to_string(),
742            value: 42.0,
743            timestamp: 1234567890,
744            tags: HashMap::new(),
745        };
746        
747        let result = monitor.record_metric(metric).await;
748        assert!(result.is_ok());
749        
750        let stats = monitor.get_stats().await;
751        assert_eq!(stats.total_metrics, 1);
752    }
753    
754    #[tokio::test]
755    async fn test_metrics_retrieval() {
756        let mut monitor = ReliabilityMonitor::new();
757        monitor.initialize().await.unwrap();
758        
759        let metric = Metric {
760            name: "test_metric".to_string(),
761            value: 42.0,
762            timestamp: 1234567890,
763            tags: HashMap::new(),
764        };
765        
766        monitor.record_metric(metric).await.unwrap();
767        
768        let time_range = TimeRange {
769            start: 1234567890,
770            end: 1234567890,
771        };
772        
773        let metrics = monitor.get_metrics("test_metric", time_range).await.unwrap();
774        assert_eq!(metrics.len(), 1);
775        assert_eq!(metrics[0].value, 42.0);
776    }
777    
778    #[tokio::test]
779    async fn test_metrics_aggregation() {
780        let mut monitor = ReliabilityMonitor::new();
781        monitor.initialize().await.unwrap();
782        
783        // Record multiple metrics
784        for i in 0..5 {
785            let metric = Metric {
786                name: "test_metric".to_string(),
787                value: i as f64,
788                timestamp: 1234567890 + i,
789                tags: HashMap::new(),
790            };
791            monitor.record_metric(metric).await.unwrap();
792        }
793        
794        let time_range = TimeRange {
795            start: 1234567890,
796            end: 1234567895,
797        };
798        
799        let aggregated = monitor.get_aggregated_metrics("test_metric", AggregationType::Sum, time_range.clone()).await.unwrap();
800        assert_eq!(aggregated.value, 10.0); // 0 + 1 + 2 + 3 + 4
801        assert_eq!(aggregated.count, 5);
802        
803        let aggregated = monitor.get_aggregated_metrics("test_metric", AggregationType::Average, time_range.clone()).await.unwrap();
804        assert_eq!(aggregated.value, 2.0); // (0 + 1 + 2 + 3 + 4) / 5
805        assert_eq!(aggregated.count, 5);
806        
807        let aggregated = monitor.get_aggregated_metrics("test_metric", AggregationType::Min, time_range.clone()).await.unwrap();
808        assert_eq!(aggregated.value, 0.0);
809        assert_eq!(aggregated.count, 5);
810        
811        let aggregated = monitor.get_aggregated_metrics("test_metric", AggregationType::Max, time_range).await.unwrap();
812        assert_eq!(aggregated.value, 4.0);
813        assert_eq!(aggregated.count, 5);
814    }
815    
816    #[tokio::test]
817    async fn test_health_status() {
818        let mut monitor = ReliabilityMonitor::new();
819        monitor.initialize().await.unwrap();
820        
821        let health_status = monitor.get_health_status().await.unwrap();
822        assert_eq!(health_status, HealthStatus::Healthy);
823    }
824    
825    #[tokio::test]
826    async fn test_system_status() {
827        let mut monitor = ReliabilityMonitor::new();
828        monitor.initialize().await.unwrap();
829        
830        let status = monitor.get_status().await.unwrap();
831        assert_eq!(status.health_status, HealthStatus::Healthy);
832        assert_eq!(status.metrics_count, 0);
833        assert_eq!(status.alerts_count, 0);
834    }
835    
836    #[tokio::test]
837    async fn test_alert_condition_evaluation() {
838        let condition = AlertCondition {
839            operator: ComparisonOperator::GreaterThan,
840            threshold: 10.0,
841        };
842        
843        assert!(condition.evaluate(15.0));
844        assert!(!condition.evaluate(5.0));
845        assert!(!condition.evaluate(10.0));
846        
847        let condition = AlertCondition {
848            operator: ComparisonOperator::GreaterThanOrEqual,
849            threshold: 10.0,
850        };
851        
852        assert!(condition.evaluate(15.0));
853        assert!(!condition.evaluate(5.0));
854        assert!(condition.evaluate(10.0));
855        
856        let condition = AlertCondition {
857            operator: ComparisonOperator::LessThan,
858            threshold: 10.0,
859        };
860        
861        assert!(!condition.evaluate(15.0));
862        assert!(condition.evaluate(5.0));
863        assert!(!condition.evaluate(10.0));
864        
865        let condition = AlertCondition {
866            operator: ComparisonOperator::LessThanOrEqual,
867            threshold: 10.0,
868        };
869        
870        assert!(!condition.evaluate(15.0));
871        assert!(condition.evaluate(5.0));
872        assert!(condition.evaluate(10.0));
873        
874        let condition = AlertCondition {
875            operator: ComparisonOperator::Equal,
876            threshold: 10.0,
877        };
878        
879        assert!(!condition.evaluate(15.0));
880        assert!(!condition.evaluate(5.0));
881        assert!(condition.evaluate(10.0));
882        
883        let condition = AlertCondition {
884            operator: ComparisonOperator::NotEqual,
885            threshold: 10.0,
886        };
887        
888        assert!(condition.evaluate(15.0));
889        assert!(condition.evaluate(5.0));
890        assert!(!condition.evaluate(10.0));
891    }
892    
893    #[tokio::test]
894    async fn test_alert_rule_triggering() {
895        let mut monitor = ReliabilityMonitor::new();
896        monitor.initialize().await.unwrap();
897        
898        // This would require setting up alert rules in the configuration
899        // For now, just test that the system can handle metrics without errors
900        let metric = Metric {
901            name: "test_metric".to_string(),
902            value: 42.0,
903            timestamp: 1234567890,
904            tags: HashMap::new(),
905        };
906        
907        let result = monitor.record_metric(metric).await;
908        assert!(result.is_ok());
909    }
910    
911    #[test]
912    fn test_monitor_config_default() {
913        let config = MonitorConfig::default();
914        assert_eq!(config.metrics_config.max_metrics_per_name, 1000);
915        assert!(config.alert_config.rules.is_empty());
916        assert!(config.health_config.health_checks.is_empty());
917    }
918    
919    #[test]
920    fn test_metric_creation() {
921        let metric = Metric {
922            name: "test_metric".to_string(),
923            value: 42.0,
924            timestamp: 1234567890,
925            tags: HashMap::new(),
926        };
927        
928        assert_eq!(metric.name, "test_metric");
929        assert_eq!(metric.value, 42.0);
930        assert_eq!(metric.timestamp, 1234567890);
931    }
932    
933    #[test]
934    fn test_time_range_creation() {
935        let time_range = TimeRange {
936            start: 1234567890,
937            end: 1234567895,
938        };
939        
940        assert_eq!(time_range.start, 1234567890);
941        assert_eq!(time_range.end, 1234567895);
942    }
943    
944    #[test]
945    fn test_alert_rule_creation() {
946        let rule = AlertRule {
947            id: "test_rule".to_string(),
948            metric_name: "test_metric".to_string(),
949            condition: AlertCondition {
950                operator: ComparisonOperator::GreaterThan,
951                threshold: 10.0,
952            },
953            severity: AlertSeverity::High,
954            message: "Test alert".to_string(),
955        };
956        
957        assert_eq!(rule.id, "test_rule");
958        assert_eq!(rule.metric_name, "test_metric");
959        assert_eq!(rule.severity, AlertSeverity::High);
960        assert_eq!(rule.message, "Test alert");
961    }
962    
963    #[test]
964    fn test_health_check_creation() {
965        let health_check = HealthCheck {
966            name: "test_check".to_string(),
967            is_healthy: true,
968            message: "Test check passed".to_string(),
969            last_checked: 1234567890,
970        };
971        
972        assert_eq!(health_check.name, "test_check");
973        assert!(health_check.is_healthy);
974        assert_eq!(health_check.message, "Test check passed");
975        assert_eq!(health_check.last_checked, 1234567890);
976    }
977}