Skip to main content

mockforge_reporting/
trend_analysis.rs

1//! Trend analysis for orchestration metrics over time
2
3use crate::pdf::ExecutionReport;
4use crate::{ReportingError, Result};
5use chrono::{DateTime, Duration, Utc};
6use serde::{Deserialize, Serialize};
7
8/// Trend direction
9#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
10#[serde(rename_all = "lowercase")]
11pub enum TrendDirection {
12    Improving,
13    Degrading,
14    Stable,
15    Volatile,
16}
17
18/// Trend report for a metric
19#[derive(Debug, Clone, Serialize, Deserialize)]
20pub struct TrendReport {
21    pub metric_name: String,
22    pub trend: TrendDirection,
23    pub change_percentage: f64,
24    pub current_value: f64,
25    pub previous_value: f64,
26    pub average_value: f64,
27    pub std_deviation: f64,
28    pub data_points: Vec<DataPoint>,
29    pub forecast: Vec<ForecastPoint>,
30    pub anomalies: Vec<AnomalyPoint>,
31}
32
33/// Historical data point
34#[derive(Debug, Clone, Serialize, Deserialize)]
35pub struct DataPoint {
36    pub timestamp: DateTime<Utc>,
37    pub value: f64,
38}
39
40/// Forecasted data point
41#[derive(Debug, Clone, Serialize, Deserialize)]
42pub struct ForecastPoint {
43    pub timestamp: DateTime<Utc>,
44    pub predicted_value: f64,
45    pub confidence_interval: (f64, f64),
46}
47
48/// Anomaly point
49#[derive(Debug, Clone, Serialize, Deserialize)]
50pub struct AnomalyPoint {
51    pub timestamp: DateTime<Utc>,
52    pub value: f64,
53    pub severity: String,
54}
55
56/// Regression result
57#[derive(Debug, Clone, Serialize, Deserialize)]
58pub struct RegressionResult {
59    pub slope: f64,
60    pub intercept: f64,
61    pub r_squared: f64,
62}
63
64/// Trend analyzer
65pub struct TrendAnalyzer {
66    historical_reports: Vec<ExecutionReport>,
67}
68
69impl TrendAnalyzer {
70    /// Create a new trend analyzer
71    pub fn new() -> Self {
72        Self {
73            historical_reports: Vec::new(),
74        }
75    }
76
77    /// Add historical report
78    pub fn add_report(&mut self, report: ExecutionReport) {
79        self.historical_reports.push(report);
80        // Keep sorted by time
81        self.historical_reports.sort_by_key(|r| r.start_time);
82    }
83
84    /// Analyze trends for a metric
85    pub fn analyze_metric(&self, metric_name: &str) -> Result<TrendReport> {
86        if self.historical_reports.is_empty() {
87            return Err(ReportingError::Analysis("No historical data available".to_string()));
88        }
89
90        // Extract metric values
91        let data_points = self.extract_metric_values(metric_name)?;
92
93        if data_points.is_empty() {
94            return Err(ReportingError::Analysis(format!("No data for metric: {}", metric_name)));
95        }
96
97        // Calculate statistics
98        let values: Vec<f64> = data_points.iter().map(|dp| dp.value).collect();
99        let average_value = values.iter().sum::<f64>() / values.len() as f64;
100
101        let variance =
102            values.iter().map(|v| (v - average_value).powi(2)).sum::<f64>() / values.len() as f64;
103        let std_deviation = variance.sqrt();
104
105        // Calculate trend
106        let regression = self.linear_regression(&data_points);
107        let trend = self.determine_trend(&regression, std_deviation);
108
109        // Calculate change percentage
110        let current_value = data_points.last().unwrap().value;
111        let previous_value = if data_points.len() > 1 {
112            data_points[data_points.len() - 2].value
113        } else {
114            current_value
115        };
116
117        let change_percentage = if previous_value != 0.0 {
118            ((current_value - previous_value) / previous_value) * 100.0
119        } else {
120            0.0
121        };
122
123        // Detect anomalies
124        let anomalies = self.detect_anomalies(&data_points, average_value, std_deviation);
125
126        // Generate forecast
127        let forecast = self.generate_forecast(&regression, &data_points, 5);
128
129        Ok(TrendReport {
130            metric_name: metric_name.to_string(),
131            trend,
132            change_percentage,
133            current_value,
134            previous_value,
135            average_value,
136            std_deviation,
137            data_points,
138            forecast,
139            anomalies,
140        })
141    }
142
143    /// Extract metric values from reports
144    fn extract_metric_values(&self, metric_name: &str) -> Result<Vec<DataPoint>> {
145        let mut data_points = Vec::new();
146
147        for report in &self.historical_reports {
148            let value = match metric_name {
149                "error_rate" => report.metrics.error_rate,
150                "avg_latency" => report.metrics.avg_latency_ms,
151                "p95_latency" => report.metrics.p95_latency_ms,
152                "p99_latency" => report.metrics.p99_latency_ms,
153                "total_requests" => report.metrics.total_requests as f64,
154                "failed_requests" => report.metrics.failed_requests as f64,
155                "success_rate" => {
156                    if report.metrics.total_requests > 0 {
157                        report.metrics.successful_requests as f64
158                            / report.metrics.total_requests as f64
159                    } else {
160                        0.0
161                    }
162                }
163                _ => {
164                    return Err(ReportingError::Analysis(format!(
165                        "Unknown metric: {}",
166                        metric_name
167                    )))
168                }
169            };
170
171            data_points.push(DataPoint {
172                timestamp: report.start_time,
173                value,
174            });
175        }
176
177        Ok(data_points)
178    }
179
180    /// Perform linear regression
181    fn linear_regression(&self, data_points: &[DataPoint]) -> RegressionResult {
182        if data_points.len() < 2 {
183            return RegressionResult {
184                slope: 0.0,
185                intercept: 0.0,
186                r_squared: 0.0,
187            };
188        }
189
190        let n = data_points.len() as f64;
191
192        // Convert timestamps to x values (days since first point)
193        let x_values: Vec<f64> = data_points
194            .iter()
195            .map(|dp| (dp.timestamp - data_points[0].timestamp).num_seconds() as f64 / 86400.0)
196            .collect();
197
198        let y_values: Vec<f64> = data_points.iter().map(|dp| dp.value).collect();
199
200        let sum_x: f64 = x_values.iter().sum();
201        let sum_y: f64 = y_values.iter().sum();
202        let sum_xy: f64 = x_values.iter().zip(&y_values).map(|(x, y)| x * y).sum();
203        let sum_xx: f64 = x_values.iter().map(|x| x * x).sum();
204
205        let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
206        let intercept = (sum_y - slope * sum_x) / n;
207
208        // Calculate R-squared
209        let mean_y = sum_y / n;
210        let ss_tot: f64 = y_values.iter().map(|y| (y - mean_y).powi(2)).sum();
211        let ss_res: f64 = x_values
212            .iter()
213            .zip(&y_values)
214            .map(|(x, y)| {
215                let predicted = slope * x + intercept;
216                (y - predicted).powi(2)
217            })
218            .sum();
219
220        let r_squared = if ss_tot > 0.0 {
221            1.0 - (ss_res / ss_tot)
222        } else {
223            0.0
224        };
225
226        RegressionResult {
227            slope,
228            intercept,
229            r_squared,
230        }
231    }
232
233    /// Determine trend direction
234    fn determine_trend(&self, regression: &RegressionResult, std_dev: f64) -> TrendDirection {
235        let slope_threshold = std_dev * 0.1;
236
237        if regression.r_squared < 0.5 {
238            // Low correlation - volatile
239            TrendDirection::Volatile
240        } else if regression.slope.abs() < slope_threshold {
241            // Minimal change - stable
242            TrendDirection::Stable
243        } else if regression.slope > 0.0 {
244            // Positive slope - for error rates this is degrading
245            TrendDirection::Degrading
246        } else {
247            // Negative slope - for error rates this is improving
248            TrendDirection::Improving
249        }
250    }
251
252    /// Detect anomalies using statistical methods
253    fn detect_anomalies(
254        &self,
255        data_points: &[DataPoint],
256        mean: f64,
257        std_dev: f64,
258    ) -> Vec<AnomalyPoint> {
259        let mut anomalies = Vec::new();
260        let threshold = 2.0; // 2 standard deviations
261
262        for point in data_points {
263            let z_score = ((point.value - mean) / std_dev).abs();
264
265            if z_score > threshold {
266                let severity = if z_score > 3.0 { "high" } else { "medium" };
267
268                anomalies.push(AnomalyPoint {
269                    timestamp: point.timestamp,
270                    value: point.value,
271                    severity: severity.to_string(),
272                });
273            }
274        }
275
276        anomalies
277    }
278
279    /// Generate forecast using linear regression
280    fn generate_forecast(
281        &self,
282        regression: &RegressionResult,
283        data_points: &[DataPoint],
284        periods: usize,
285    ) -> Vec<ForecastPoint> {
286        let mut forecast = Vec::new();
287
288        if data_points.is_empty() {
289            return forecast;
290        }
291
292        let last_timestamp = data_points.last().unwrap().timestamp;
293        let first_timestamp = data_points[0].timestamp;
294        let n = data_points.len() as f64;
295
296        // Calculate standard error of the estimate from regression residuals:
297        // SE = sqrt( sum((y_i - predicted_i)^2) / (n - 2) )
298        let std_error = if data_points.len() > 2 {
299            let sum_sq_residuals: f64 = data_points
300                .iter()
301                .map(|dp| {
302                    let x = (dp.timestamp - first_timestamp).num_seconds() as f64 / 86400.0;
303                    let predicted = regression.slope * x + regression.intercept;
304                    (dp.value - predicted).powi(2)
305                })
306                .sum();
307            (sum_sq_residuals / (n - 2.0)).sqrt()
308        } else {
309            0.0
310        };
311
312        for i in 1..=periods {
313            let future_timestamp = last_timestamp + Duration::days(i as i64);
314            let days_from_start =
315                (future_timestamp - first_timestamp).num_seconds() as f64 / 86400.0;
316
317            let predicted_value = regression.slope * days_from_start + regression.intercept;
318
319            // Confidence interval: ±2 standard errors
320            let confidence_interval =
321                (predicted_value - 2.0 * std_error, predicted_value + 2.0 * std_error);
322
323            forecast.push(ForecastPoint {
324                timestamp: future_timestamp,
325                predicted_value,
326                confidence_interval,
327            });
328        }
329
330        forecast
331    }
332
333    /// Get all available metrics
334    pub fn available_metrics(&self) -> Vec<String> {
335        vec![
336            "error_rate".to_string(),
337            "avg_latency".to_string(),
338            "p95_latency".to_string(),
339            "p99_latency".to_string(),
340            "total_requests".to_string(),
341            "failed_requests".to_string(),
342            "success_rate".to_string(),
343        ]
344    }
345
346    /// Analyze all metrics
347    pub fn analyze_all_metrics(&self) -> Result<Vec<TrendReport>> {
348        let mut reports = Vec::new();
349
350        for metric in self.available_metrics() {
351            if let Ok(report) = self.analyze_metric(&metric) {
352                reports.push(report);
353            }
354        }
355
356        Ok(reports)
357    }
358}
359
360impl Default for TrendAnalyzer {
361    fn default() -> Self {
362        Self::new()
363    }
364}
365
366#[cfg(test)]
367mod tests {
368    use super::*;
369    use crate::pdf::ReportMetrics;
370
371    fn create_test_report(i: i64, avg_latency: f64, error_rate: f64) -> ExecutionReport {
372        ExecutionReport {
373            orchestration_name: "test".to_string(),
374            start_time: Utc::now() - Duration::days(10 - i),
375            end_time: Utc::now() - Duration::days(10 - i),
376            duration_seconds: 100,
377            status: "Completed".to_string(),
378            total_steps: 5,
379            completed_steps: 5,
380            failed_steps: 0,
381            metrics: ReportMetrics {
382                total_requests: 1000,
383                successful_requests: 980,
384                failed_requests: 20,
385                avg_latency_ms: avg_latency,
386                p95_latency_ms: 200.0,
387                p99_latency_ms: 300.0,
388                error_rate,
389            },
390            failures: vec![],
391            recommendations: vec![],
392        }
393    }
394
395    #[test]
396    fn test_trend_analyzer() {
397        let mut analyzer = TrendAnalyzer::new();
398
399        for i in 0..10 {
400            let report = create_test_report(i, 100.0 + i as f64 * 5.0, 0.02);
401            analyzer.add_report(report);
402        }
403
404        let trend = analyzer.analyze_metric("avg_latency").unwrap();
405        assert_eq!(trend.metric_name, "avg_latency");
406        assert!(trend.data_points.len() >= 10);
407    }
408
409    #[test]
410    fn test_trend_analyzer_new() {
411        let analyzer = TrendAnalyzer::new();
412        assert!(analyzer.historical_reports.is_empty());
413    }
414
415    #[test]
416    fn test_trend_analyzer_default() {
417        let analyzer = TrendAnalyzer::default();
418        assert!(analyzer.historical_reports.is_empty());
419    }
420
421    #[test]
422    fn test_trend_direction_enum_serialize() {
423        let improving = TrendDirection::Improving;
424        let json = serde_json::to_string(&improving).unwrap();
425        assert_eq!(json, "\"improving\"");
426
427        let degrading = TrendDirection::Degrading;
428        let json = serde_json::to_string(&degrading).unwrap();
429        assert_eq!(json, "\"degrading\"");
430
431        let stable = TrendDirection::Stable;
432        let json = serde_json::to_string(&stable).unwrap();
433        assert_eq!(json, "\"stable\"");
434
435        let volatile = TrendDirection::Volatile;
436        let json = serde_json::to_string(&volatile).unwrap();
437        assert_eq!(json, "\"volatile\"");
438    }
439
440    #[test]
441    fn test_analyze_no_historical_data() {
442        let analyzer = TrendAnalyzer::new();
443        let result = analyzer.analyze_metric("error_rate");
444        assert!(result.is_err());
445    }
446
447    #[test]
448    fn test_analyze_unknown_metric() {
449        let mut analyzer = TrendAnalyzer::new();
450        analyzer.add_report(create_test_report(0, 100.0, 0.02));
451
452        let result = analyzer.analyze_metric("unknown_metric");
453        assert!(result.is_err());
454    }
455
456    #[test]
457    fn test_available_metrics() {
458        let analyzer = TrendAnalyzer::new();
459        let metrics = analyzer.available_metrics();
460
461        assert!(metrics.contains(&"error_rate".to_string()));
462        assert!(metrics.contains(&"avg_latency".to_string()));
463        assert!(metrics.contains(&"p95_latency".to_string()));
464        assert!(metrics.contains(&"p99_latency".to_string()));
465        assert!(metrics.contains(&"total_requests".to_string()));
466        assert!(metrics.contains(&"failed_requests".to_string()));
467        assert!(metrics.contains(&"success_rate".to_string()));
468    }
469
470    #[test]
471    fn test_analyze_all_metrics() {
472        let mut analyzer = TrendAnalyzer::new();
473
474        for i in 0..5 {
475            analyzer.add_report(create_test_report(i, 100.0 + i as f64 * 2.0, 0.02));
476        }
477
478        let reports = analyzer.analyze_all_metrics().unwrap();
479        assert!(!reports.is_empty());
480    }
481
482    #[test]
483    fn test_trend_report_clone() {
484        let report = TrendReport {
485            metric_name: "error_rate".to_string(),
486            trend: TrendDirection::Stable,
487            change_percentage: 0.0,
488            current_value: 0.02,
489            previous_value: 0.02,
490            average_value: 0.02,
491            std_deviation: 0.001,
492            data_points: vec![],
493            forecast: vec![],
494            anomalies: vec![],
495        };
496
497        let cloned = report.clone();
498        assert_eq!(report.metric_name, cloned.metric_name);
499        assert_eq!(report.trend, cloned.trend);
500    }
501
502    #[test]
503    fn test_data_point_clone() {
504        let point = DataPoint {
505            timestamp: Utc::now(),
506            value: 100.0,
507        };
508
509        let cloned = point.clone();
510        assert_eq!(point.timestamp, cloned.timestamp);
511        assert_eq!(point.value, cloned.value);
512    }
513
514    #[test]
515    fn test_forecast_point_clone() {
516        let point = ForecastPoint {
517            timestamp: Utc::now(),
518            predicted_value: 105.0,
519            confidence_interval: (100.0, 110.0),
520        };
521
522        let cloned = point.clone();
523        assert_eq!(point.predicted_value, cloned.predicted_value);
524        assert_eq!(point.confidence_interval, cloned.confidence_interval);
525    }
526
527    #[test]
528    fn test_anomaly_point_clone() {
529        let point = AnomalyPoint {
530            timestamp: Utc::now(),
531            value: 500.0,
532            severity: "high".to_string(),
533        };
534
535        let cloned = point.clone();
536        assert_eq!(point.value, cloned.value);
537        assert_eq!(point.severity, cloned.severity);
538    }
539
540    #[test]
541    fn test_regression_result_clone() {
542        let result = RegressionResult {
543            slope: 1.5,
544            intercept: 100.0,
545            r_squared: 0.95,
546        };
547
548        let cloned = result.clone();
549        assert_eq!(result.slope, cloned.slope);
550        assert_eq!(result.r_squared, cloned.r_squared);
551    }
552
553    #[test]
554    fn test_trend_degrading() {
555        let mut analyzer = TrendAnalyzer::new();
556
557        // Create reports with increasing error rate (degrading)
558        for i in 0..10 {
559            let report = create_test_report(i, 100.0, 0.01 + i as f64 * 0.02);
560            analyzer.add_report(report);
561        }
562
563        let trend = analyzer.analyze_metric("error_rate").unwrap();
564        assert!(matches!(trend.trend, TrendDirection::Degrading | TrendDirection::Volatile));
565    }
566
567    #[test]
568    fn test_trend_stable() {
569        let mut analyzer = TrendAnalyzer::new();
570
571        // Create reports with stable metrics
572        for i in 0..10 {
573            let report = create_test_report(i, 100.0, 0.02);
574            analyzer.add_report(report);
575        }
576
577        let trend = analyzer.analyze_metric("error_rate").unwrap();
578        // With no variation, should be stable or volatile (depending on r_squared)
579        assert!(matches!(trend.trend, TrendDirection::Stable | TrendDirection::Volatile));
580    }
581
582    #[test]
583    fn test_forecast_generation() {
584        let mut analyzer = TrendAnalyzer::new();
585
586        for i in 0..10 {
587            let report = create_test_report(i, 100.0 + i as f64 * 5.0, 0.02);
588            analyzer.add_report(report);
589        }
590
591        let trend = analyzer.analyze_metric("avg_latency").unwrap();
592        assert!(!trend.forecast.is_empty());
593        assert_eq!(trend.forecast.len(), 5); // Default forecast periods
594    }
595
596    #[test]
597    fn test_trend_report_serialize() {
598        let mut analyzer = TrendAnalyzer::new();
599
600        for i in 0..5 {
601            analyzer.add_report(create_test_report(i, 100.0, 0.02));
602        }
603
604        let trend = analyzer.analyze_metric("error_rate").unwrap();
605        let json = serde_json::to_string(&trend).unwrap();
606        assert!(json.contains("metric_name"));
607        assert!(json.contains("trend"));
608    }
609
610    #[test]
611    fn test_single_report_analysis() {
612        let mut analyzer = TrendAnalyzer::new();
613        analyzer.add_report(create_test_report(0, 100.0, 0.02));
614
615        let trend = analyzer.analyze_metric("error_rate").unwrap();
616        assert_eq!(trend.data_points.len(), 1);
617        // With single point, change_percentage should be 0
618        assert_eq!(trend.change_percentage, 0.0);
619    }
620
621    #[test]
622    fn test_success_rate_metric() {
623        let mut analyzer = TrendAnalyzer::new();
624
625        for i in 0..5 {
626            analyzer.add_report(create_test_report(i, 100.0, 0.02));
627        }
628
629        let trend = analyzer.analyze_metric("success_rate").unwrap();
630        assert_eq!(trend.metric_name, "success_rate");
631        // Success rate should be ~0.98 (980/1000)
632        assert!(trend.current_value > 0.9);
633    }
634
635    #[test]
636    fn test_reports_sorted_by_time() {
637        let mut analyzer = TrendAnalyzer::new();
638
639        // Add reports out of order
640        analyzer.add_report(create_test_report(5, 100.0, 0.02));
641        analyzer.add_report(create_test_report(0, 100.0, 0.02));
642        analyzer.add_report(create_test_report(3, 100.0, 0.02));
643
644        // Reports should be sorted by time
645        let times: Vec<_> = analyzer.historical_reports.iter().map(|r| r.start_time).collect();
646        for i in 1..times.len() {
647            assert!(times[i] >= times[i - 1]);
648        }
649    }
650}