1use super::config::*;
8use super::optimizer::{Adaptation, AdaptationPriority, AdaptationType, StreamingDataPoint};
9use super::resource_management::ResourceUsage;
10
11use scirs2_core::numeric::Float;
12use serde::{Deserialize, Serialize};
13use std::collections::{HashMap, VecDeque};
14use std::iter::Sum;
15use std::time::{Duration, Instant};
16
17#[derive(Debug, Clone)]
19pub struct PerformanceSnapshot<A: Float + Send + Sync> {
20 pub timestamp: Instant,
22 pub loss: A,
24 pub accuracy: Option<A>,
26 pub convergence_rate: Option<A>,
28 pub gradient_norm: Option<A>,
30 pub parameter_update_magnitude: Option<A>,
32 pub data_statistics: DataStatistics<A>,
34 pub resource_usage: ResourceUsage,
36 pub custom_metrics: HashMap<String, A>,
38}
39
40#[derive(Debug, Clone)]
42pub struct DataStatistics<A: Float + Send + Sync> {
43 pub sample_count: usize,
45 pub feature_means: scirs2_core::ndarray::Array1<A>,
47 pub feature_stds: scirs2_core::ndarray::Array1<A>,
49 pub average_quality: A,
51 pub timestamp: Instant,
53}
54
55impl<A: Float + Send + Sync> Default for DataStatistics<A> {
56 fn default() -> Self {
57 Self {
58 sample_count: 0,
59 feature_means: scirs2_core::ndarray::Array1::zeros(0),
60 feature_stds: scirs2_core::ndarray::Array1::zeros(0),
61 average_quality: A::zero(),
62 timestamp: Instant::now(),
63 }
64 }
65}
66
67#[derive(Debug, Clone)]
69pub enum PerformanceMetric<A: Float + Send + Sync> {
70 Loss(A),
72 Accuracy(A),
74 ConvergenceRate(A),
76 GradientNorm(A),
78 LearningRateEffectiveness(A),
80 ResourceEfficiency(A),
82 DataQuality(A),
84 Custom(String, A),
86}
87
88#[derive(Debug, Clone)]
90pub struct PerformanceContext<A: Float + Send + Sync> {
91 pub learning_rate: A,
93 pub batch_size: usize,
95 pub buffer_size: usize,
97 pub drift_detected: bool,
99 pub resource_constraints: ResourceUsage,
101 pub time_since_adaptation: Duration,
103}
104
105pub struct PerformanceTracker<A: Float + Send + Sync + std::iter::Sum> {
107 config: PerformanceConfig,
109 performance_history: VecDeque<PerformanceSnapshot<A>>,
111 trend_analyzer: PerformanceTrendAnalyzer<A>,
113 predictor: PerformancePredictor<A>,
115 baseline: Option<PerformanceSnapshot<A>>,
117 current_context: Option<PerformanceContext<A>>,
119 improvement_tracker: PerformanceImprovementTracker<A>,
121 performance_anomaly_detector: PerformanceAnomalyDetector<A>,
123}
124
125pub struct PerformanceTrendAnalyzer<A: Float + Send + Sync> {
127 window_size: usize,
129 trends: HashMap<String, TrendData<A>>,
131 trend_methods: Vec<TrendMethod>,
133}
134
135#[derive(Debug, Clone)]
137pub struct TrendData<A: Float + Send + Sync> {
138 pub slope: A,
140 pub correlation: A,
142 pub volatility: A,
144 pub confidence: A,
146 pub recent_values: VecDeque<A>,
148 pub last_update: Instant,
150}
151
152#[derive(Debug, Clone)]
154pub enum TrendMethod {
155 LinearRegression,
157 MovingAverage { window: usize },
159 ExponentialSmoothing { alpha: f64 },
161 SeasonalDecomposition,
163}
164
165pub struct PerformancePredictor<A: Float + Send + Sync> {
167 prediction_methods: Vec<PredictionMethod>,
169 prediction_history: VecDeque<PredictionResult<A>>,
171 model_accuracies: HashMap<String, A>,
173 ensemble_weights: HashMap<String, A>,
175}
176
177#[derive(Debug, Clone)]
179pub enum PredictionMethod {
180 Linear,
182 Exponential { alpha: f64, beta: f64 },
184 ARIMA { p: usize, d: usize, q: usize },
186 NeuralNetwork { hidden_layers: Vec<usize> },
188 Ensemble,
190}
191
192#[derive(Debug, Clone)]
194pub struct PredictionResult<A: Float + Send + Sync> {
195 pub predicted_value: A,
197 pub confidence_interval: (A, A),
199 pub method: String,
201 pub steps_ahead: usize,
203 pub timestamp: Instant,
205 pub actual_value: Option<A>,
207}
208
209pub struct PerformanceImprovementTracker<A: Float + Send + Sync> {
211 baseline_metrics: HashMap<String, A>,
213 improvement_rates: HashMap<String, A>,
215 improvement_history: VecDeque<ImprovementEvent<A>>,
217 plateau_detector: PlateauDetector<A>,
219}
220
221#[derive(Debug, Clone)]
223pub struct ImprovementEvent<A: Float + Send + Sync> {
224 pub timestamp: Instant,
226 pub metric_name: String,
228 pub improvement: A,
230 pub improvement_rate: A,
232 pub context: String,
234}
235
236pub struct PlateauDetector<A: Float + Send + Sync> {
238 window_size: usize,
240 plateau_threshold: A,
242 recent_values: VecDeque<A>,
244 is_plateau: bool,
246 plateau_duration: Duration,
248 last_significant_change: Option<Instant>,
250}
251
252pub struct PerformanceAnomalyDetector<A: Float + Send + Sync> {
254 threshold: A,
256 historical_stats: HashMap<String, MetricStatistics<A>>,
258 recent_anomalies: VecDeque<PerformanceAnomaly<A>>,
260 adaptive_threshold: bool,
262}
263
264#[derive(Debug, Clone)]
266pub struct MetricStatistics<A: Float + Send + Sync> {
267 pub mean: A,
269 pub variance: A,
271 pub min_value: A,
273 pub max_value: A,
275 pub count: usize,
277 pub last_update: Instant,
279}
280
281#[derive(Debug, Clone)]
283pub struct PerformanceAnomaly<A: Float + Send + Sync> {
284 pub timestamp: Instant,
286 pub metric_name: String,
288 pub observed_value: A,
290 pub expected_range: (A, A),
292 pub severity: AnomalySeverity,
294 pub anomaly_type: AnomalyType,
296}
297
298#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
300pub enum AnomalySeverity {
301 Minor,
303 Moderate,
305 Major,
307 Critical,
309}
310
311#[derive(Debug, Clone, PartialEq, Eq)]
313pub enum AnomalyType {
314 High,
316 Low,
318 TrendChange,
320 Oscillation,
322 Degradation,
324 Plateau,
326}
327
328impl<A: Float + Default + Clone + std::iter::Sum + Send + Sync + std::fmt::Debug>
329 PerformanceTracker<A>
330{
331 pub fn new(config: &StreamingConfig) -> Result<Self, String> {
333 let performance_config = config.performance_config.clone();
334
335 let trend_analyzer = PerformanceTrendAnalyzer::new(performance_config.trend_window_size);
336 let predictor = PerformancePredictor::new();
337 let improvement_tracker = PerformanceImprovementTracker::new();
338 let performance_anomaly_detector = PerformanceAnomalyDetector::new(2.0); Ok(Self {
341 config: performance_config,
342 performance_history: VecDeque::with_capacity(1000),
343 trend_analyzer,
344 predictor,
345 baseline: None,
346 current_context: None,
347 improvement_tracker,
348 performance_anomaly_detector,
349 })
350 }
351
352 pub fn add_performance(&mut self, snapshot: PerformanceSnapshot<A>) -> Result<(), String> {
354 if self.performance_history.len() >= self.config.history_size {
356 self.performance_history.pop_front();
357 }
358 self.performance_history.push_back(snapshot.clone());
359
360 if self.baseline.is_none() {
362 self.baseline = Some(snapshot.clone());
363 }
364
365 if self.config.enable_trend_analysis {
367 self.trend_analyzer.update(&snapshot)?;
368 }
369
370 self.improvement_tracker.update(&snapshot)?;
372
373 let anomalies = self
375 .performance_anomaly_detector
376 .check_for_anomalies(&snapshot)?;
377 if !anomalies.is_empty() {
378 self.handle_performance_anomalies(&anomalies)?;
380 }
381
382 if self.config.enable_prediction {
384 self.predictor.update_with_actual(&snapshot)?;
385 }
386
387 Ok(())
388 }
389
390 pub fn get_recent_performance(&self, count: usize) -> Vec<PerformanceSnapshot<A>> {
392 self.performance_history
393 .iter()
394 .rev()
395 .take(count)
396 .cloned()
397 .collect()
398 }
399
400 pub fn get_recent_losses(&self, count: usize) -> Vec<A> {
402 self.performance_history
403 .iter()
404 .rev()
405 .take(count)
406 .map(|snapshot| snapshot.loss)
407 .collect()
408 }
409
410 pub fn predict_performance(
412 &mut self,
413 steps_ahead: usize,
414 ) -> Result<PredictionResult<A>, String> {
415 if !self.config.enable_prediction {
416 return Err("Performance prediction is disabled".to_string());
417 }
418
419 self.predictor
420 .predict(steps_ahead, &self.performance_history)
421 }
422
423 pub fn get_performance_trends(&self) -> HashMap<String, TrendData<A>> {
425 self.trend_analyzer.get_current_trends()
426 }
427
428 pub fn apply_threshold_adaptation(&mut self, adaptation: &Adaptation<A>) -> Result<(), String> {
430 if adaptation.adaptation_type == AdaptationType::PerformanceThreshold {
431 let new_threshold = self.performance_anomaly_detector.threshold + adaptation.magnitude;
433 self.performance_anomaly_detector
434 .update_threshold(new_threshold);
435 }
436 Ok(())
437 }
438
439 fn handle_performance_anomalies(
441 &mut self,
442 anomalies: &[PerformanceAnomaly<A>],
443 ) -> Result<(), String> {
444 for anomaly in anomalies {
445 match anomaly.severity {
446 AnomalySeverity::Critical | AnomalySeverity::Major => {
447 println!("Critical performance anomaly detected: {:?}", anomaly);
449 }
450 _ => {
451 self.performance_anomaly_detector
453 .recent_anomalies
454 .push_back(anomaly.clone());
455 }
456 }
457 }
458 Ok(())
459 }
460
461 pub fn reset(&mut self) -> Result<(), String> {
463 self.performance_history.clear();
464 self.baseline = None;
465 self.current_context = None;
466 self.trend_analyzer.reset();
467 self.predictor.reset();
468 self.improvement_tracker.reset();
469 self.performance_anomaly_detector.reset();
470 Ok(())
471 }
472
473 pub fn get_diagnostics(&self) -> PerformanceDiagnostics {
475 PerformanceDiagnostics {
476 history_size: self.performance_history.len(),
477 baseline_set: self.baseline.is_some(),
478 trends_available: !self.trend_analyzer.trends.is_empty(),
479 anomalies_detected: self.performance_anomaly_detector.recent_anomalies.len(),
480 plateau_detected: self.improvement_tracker.plateau_detector.is_plateau,
481 prediction_accuracy: self.predictor.get_average_accuracy(),
482 }
483 }
484}
485
486impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PerformanceTrendAnalyzer<A> {
487 fn new(window_size: usize) -> Self {
488 Self {
489 window_size,
490 trends: HashMap::new(),
491 trend_methods: vec![
492 TrendMethod::LinearRegression,
493 TrendMethod::MovingAverage {
494 window: window_size / 2,
495 },
496 TrendMethod::ExponentialSmoothing { alpha: 0.3 },
497 ],
498 }
499 }
500
501 fn update(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
502 self.update_metric_trend("loss", snapshot.loss)?;
504
505 if let Some(accuracy) = snapshot.accuracy {
506 self.update_metric_trend("accuracy", accuracy)?;
507 }
508
509 if let Some(convergence) = snapshot.convergence_rate {
510 self.update_metric_trend("convergence", convergence)?;
511 }
512
513 self.compute_trends()?;
514 Ok(())
515 }
516
517 fn update_metric_trend(&mut self, metric_name: &str, value: A) -> Result<(), String> {
518 let trend_data = self
519 .trends
520 .entry(metric_name.to_string())
521 .or_insert_with(|| TrendData {
522 slope: A::zero(),
523 correlation: A::zero(),
524 volatility: A::zero(),
525 confidence: A::zero(),
526 recent_values: VecDeque::with_capacity(self.window_size),
527 last_update: Instant::now(),
528 });
529
530 if trend_data.recent_values.len() >= self.window_size {
531 trend_data.recent_values.pop_front();
532 }
533 trend_data.recent_values.push_back(value);
534 trend_data.last_update = Instant::now();
535
536 Ok(())
537 }
538
539 fn compute_trends(&mut self) -> Result<(), String> {
540 let keys: Vec<_> = self.trends.keys().cloned().collect();
541
542 let mut computed_values = Vec::new();
544 for metric_name in &keys {
545 if let Some(trend_data) = self.trends.get(metric_name) {
546 if trend_data.recent_values.len() >= 3 {
547 let values = trend_data.recent_values.clone();
548 let slope = self.compute_slope(&values)?;
549 let correlation = self.compute_correlation(&values)?;
550 let volatility = self.compute_volatility(&values)?;
551 let confidence = self.compute_confidence(&values)?;
552 computed_values.push((
553 metric_name.clone(),
554 slope,
555 correlation,
556 volatility,
557 confidence,
558 ));
559 }
560 }
561 }
562
563 for (metric_name, slope, correlation, volatility, confidence) in computed_values {
565 if let Some(trend_data) = self.trends.get_mut(&metric_name) {
566 trend_data.slope = slope;
567 trend_data.correlation = correlation;
568 trend_data.volatility = volatility;
569 trend_data.confidence = confidence;
570 }
571 }
572
573 Ok(())
574 }
575
576 fn compute_slope(&self, values: &VecDeque<A>) -> Result<A, String> {
577 if values.len() < 2 {
578 return Ok(A::zero());
579 }
580
581 let n = A::from(values.len()).expect("unwrap failed");
582 let sum_x = n * (n + A::one()) / A::from(2.0).expect("unwrap failed");
584 let sum_y = values.iter().cloned().sum::<A>();
585 let sum_xy = values
586 .iter()
587 .enumerate()
588 .map(|(i, &y)| A::from(i + 1).expect("unwrap failed") * y)
589 .sum::<A>();
590 let two = A::from(2.0).expect("unwrap failed");
592 let six = A::from(6.0).expect("unwrap failed");
593 let sum_x_squared = n * (n + A::one()) * (two * n + A::one()) / six;
594
595 let denominator = n * sum_x_squared - sum_x * sum_x;
596 if denominator == A::zero() {
597 return Ok(A::zero());
598 }
599
600 let slope = (n * sum_xy - sum_x * sum_y) / denominator;
601 Ok(slope)
602 }
603
604 fn compute_correlation(&self, values: &VecDeque<A>) -> Result<A, String> {
605 if values.len() < 2 {
606 return Ok(A::zero());
607 }
608
609 let n = values.len();
611 let time_values: Vec<A> = (1..=n)
612 .map(|i| A::from(i).expect("unwrap failed"))
613 .collect();
614 let value_vec: Vec<A> = values.iter().cloned().collect();
615
616 let mean_time = time_values.iter().cloned().sum::<A>() / A::from(n).expect("unwrap failed");
617 let mean_value = value_vec.iter().cloned().sum::<A>() / A::from(n).expect("unwrap failed");
618
619 let numerator = time_values
620 .iter()
621 .zip(value_vec.iter())
622 .map(|(&t, &v)| (t - mean_time) * (v - mean_value))
623 .sum::<A>();
624
625 let time_variance = time_values
626 .iter()
627 .map(|&t| (t - mean_time) * (t - mean_time))
628 .sum::<A>();
629
630 let value_variance = value_vec
631 .iter()
632 .map(|&v| (v - mean_value) * (v - mean_value))
633 .sum::<A>();
634
635 let denominator = (time_variance * value_variance).sqrt();
636 if denominator == A::zero() {
637 return Ok(A::zero());
638 }
639
640 Ok(numerator / denominator)
641 }
642
643 fn compute_volatility(&self, values: &VecDeque<A>) -> Result<A, String> {
644 if values.len() < 2 {
645 return Ok(A::zero());
646 }
647
648 let mean =
649 values.iter().cloned().sum::<A>() / A::from(values.len()).expect("unwrap failed");
650 let variance = values.iter().map(|&v| (v - mean) * (v - mean)).sum::<A>()
651 / A::from(values.len()).expect("unwrap failed");
652
653 Ok(variance.sqrt())
654 }
655
656 fn compute_confidence(&self, values: &VecDeque<A>) -> Result<A, String> {
657 if values.len() < 3 {
659 return Ok(A::zero());
660 }
661
662 let slope = self.compute_slope(values)?;
663 let correlation = self.compute_correlation(values)?;
664
665 let confidence = correlation.abs() * (A::one() - (slope.abs() / (slope.abs() + A::one())));
667 Ok(confidence)
668 }
669
670 fn get_current_trends(&self) -> HashMap<String, TrendData<A>> {
671 self.trends.clone()
672 }
673
674 fn reset(&mut self) {
675 self.trends.clear();
676 }
677}
678
679impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PerformancePredictor<A> {
680 fn new() -> Self {
681 Self {
682 prediction_methods: vec![
683 PredictionMethod::Linear,
684 PredictionMethod::Exponential {
685 alpha: 0.3,
686 beta: 0.1,
687 },
688 ],
689 prediction_history: VecDeque::with_capacity(1000),
690 model_accuracies: HashMap::new(),
691 ensemble_weights: HashMap::new(),
692 }
693 }
694
695 fn predict(
696 &mut self,
697 steps_ahead: usize,
698 history: &VecDeque<PerformanceSnapshot<A>>,
699 ) -> Result<PredictionResult<A>, String> {
700 if history.len() < 2 {
701 return Err("Insufficient history for prediction".to_string());
702 }
703
704 let loss_values: Vec<A> = history.iter().map(|s| s.loss).collect();
706
707 let predicted_value = self.linear_prediction(&loss_values, steps_ahead)?;
709
710 let recent_volatility = self.compute_recent_volatility(&loss_values)?;
712 let confidence_interval = (
713 predicted_value - recent_volatility,
714 predicted_value + recent_volatility,
715 );
716
717 let prediction = PredictionResult {
718 predicted_value,
719 confidence_interval,
720 method: "linear".to_string(),
721 steps_ahead,
722 timestamp: Instant::now(),
723 actual_value: None,
724 };
725
726 if self.prediction_history.len() >= 1000 {
728 self.prediction_history.pop_front();
729 }
730 self.prediction_history.push_back(prediction.clone());
731
732 Ok(prediction)
733 }
734
735 fn linear_prediction(&self, values: &[A], steps_ahead: usize) -> Result<A, String> {
736 if values.len() < 2 {
737 return Ok(A::zero());
738 }
739
740 let n = values.len();
742 let x1 = A::from(n - 1).expect("unwrap failed");
743 let y1 = values[n - 1];
744 let x2 = A::from(n).expect("unwrap failed");
745 let y2 = values[n - 1]; if n >= 3 {
749 let slope = (values[n - 1] - values[n - 3]) / A::from(2).expect("unwrap failed");
750 let predicted = values[n - 1] + slope * A::from(steps_ahead).expect("unwrap failed");
751 Ok(predicted)
752 } else {
753 Ok(values[n - 1])
754 }
755 }
756
757 fn exponential_prediction(&self, values: &[A], steps_ahead: usize) -> Result<A, String> {
758 if values.is_empty() {
759 return Ok(A::zero());
760 }
761
762 let alpha = A::from(0.3).expect("unwrap failed");
764 let mut forecast = values[0];
765
766 for &value in values.iter().skip(1) {
767 forecast = alpha * value + (A::one() - alpha) * forecast;
768 }
769
770 for _ in 0..steps_ahead {
772 forecast = forecast * A::from(0.99).expect("unwrap failed"); }
774
775 Ok(forecast)
776 }
777
778 fn compute_recent_volatility(&self, values: &[A]) -> Result<A, String> {
779 if values.len() < 2 {
780 return Ok(A::zero());
781 }
782
783 let recent_count = values.len().min(10);
784 let recent_values = &values[values.len() - recent_count..];
785
786 let mean = recent_values.iter().cloned().sum::<A>()
787 / A::from(recent_count).expect("unwrap failed");
788 let variance = recent_values
789 .iter()
790 .map(|&v| (v - mean) * (v - mean))
791 .sum::<A>()
792 / A::from(recent_count).expect("unwrap failed");
793
794 Ok(variance.sqrt())
795 }
796
797 fn update_with_actual(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
798 let mut updated_predictions = Vec::new();
800
801 for prediction in &mut self.prediction_history {
802 if prediction.actual_value.is_none() {
803 let time_diff = snapshot.timestamp.duration_since(prediction.timestamp);
804 let expected_duration = Duration::from_secs(prediction.steps_ahead as u64 * 10); if time_diff >= expected_duration {
807 prediction.actual_value = Some(snapshot.loss);
808 updated_predictions.push(prediction.clone());
809 }
810 }
811 }
812
813 for prediction in &updated_predictions {
815 self.update_accuracy_metrics(prediction)?;
816 }
817
818 Ok(())
819 }
820
821 fn update_accuracy_metrics(&mut self, prediction: &PredictionResult<A>) -> Result<(), String> {
822 if let Some(actual) = prediction.actual_value {
823 let error = (prediction.predicted_value - actual).abs();
824 let relative_error = error / actual.max(A::from(1e-8).expect("unwrap failed"));
825
826 let accuracy = A::one() - relative_error.min(A::one());
828 let method_name = &prediction.method;
829
830 self.model_accuracies.insert(method_name.clone(), accuracy);
831 }
832
833 Ok(())
834 }
835
836 fn get_average_accuracy(&self) -> f64 {
837 if self.model_accuracies.is_empty() {
838 return 0.0;
839 }
840
841 let sum: A = self.model_accuracies.values().cloned().sum();
842 let avg = sum / A::from(self.model_accuracies.len()).expect("unwrap failed");
843 avg.to_f64().unwrap_or(0.0)
844 }
845
846 fn reset(&mut self) {
847 self.prediction_history.clear();
848 self.model_accuracies.clear();
849 self.ensemble_weights.clear();
850 }
851}
852
853impl<A: Float + Default + Clone + Sum + Send + Sync + Send + Sync>
854 PerformanceImprovementTracker<A>
855{
856 fn new() -> Self {
857 Self {
858 baseline_metrics: HashMap::new(),
859 improvement_rates: HashMap::new(),
860 improvement_history: VecDeque::with_capacity(1000),
861 plateau_detector: PlateauDetector::new(50, A::from(0.01).expect("unwrap failed")),
862 }
863 }
864
865 fn update(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
866 if self.baseline_metrics.is_empty() {
868 self.baseline_metrics
869 .insert("loss".to_string(), snapshot.loss);
870 if let Some(accuracy) = snapshot.accuracy {
871 self.baseline_metrics
872 .insert("accuracy".to_string(), accuracy);
873 }
874 }
875
876 if let Some(&baseline_loss) = self.baseline_metrics.get("loss") {
878 if snapshot.loss < baseline_loss {
879 let improvement = baseline_loss - snapshot.loss;
880 let improvement_event = ImprovementEvent {
881 timestamp: snapshot.timestamp,
882 metric_name: "loss".to_string(),
883 improvement,
884 improvement_rate: improvement / A::from(1.0).expect("unwrap failed"), context: "optimization_step".to_string(),
886 };
887
888 if self.improvement_history.len() >= 1000 {
889 self.improvement_history.pop_front();
890 }
891 self.improvement_history.push_back(improvement_event);
892
893 self.baseline_metrics
895 .insert("loss".to_string(), snapshot.loss);
896 }
897 }
898
899 self.plateau_detector.update(snapshot.loss);
901
902 Ok(())
903 }
904
905 fn reset(&mut self) {
906 self.baseline_metrics.clear();
907 self.improvement_rates.clear();
908 self.improvement_history.clear();
909 self.plateau_detector.reset();
910 }
911}
912
913impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PlateauDetector<A> {
914 fn new(window_size: usize, threshold: A) -> Self {
915 Self {
916 window_size,
917 plateau_threshold: threshold,
918 recent_values: VecDeque::with_capacity(window_size),
919 is_plateau: false,
920 plateau_duration: Duration::ZERO,
921 last_significant_change: None,
922 }
923 }
924
925 fn update(&mut self, value: A) {
926 if self.recent_values.len() >= self.window_size {
927 self.recent_values.pop_front();
928 }
929 self.recent_values.push_back(value);
930
931 if self.recent_values.len() >= self.window_size {
932 self.detect_plateau();
933 }
934 }
935
936 fn detect_plateau(&mut self) {
937 if self.recent_values.len() < 2 {
938 return;
939 }
940
941 let max_val = self.recent_values.iter().cloned().fold(A::zero(), A::max);
942 let min_val = self.recent_values.iter().cloned().fold(A::zero(), A::min);
943 let range = max_val - min_val;
944
945 let was_plateau = self.is_plateau;
946 self.is_plateau = range < self.plateau_threshold;
947
948 if self.is_plateau && !was_plateau {
949 self.plateau_duration = Duration::ZERO;
950 } else if self.is_plateau {
951 self.plateau_duration += Duration::from_secs(1); } else if !self.is_plateau {
953 self.last_significant_change = Some(Instant::now());
954 self.plateau_duration = Duration::ZERO;
955 }
956 }
957
958 fn reset(&mut self) {
959 self.recent_values.clear();
960 self.is_plateau = false;
961 self.plateau_duration = Duration::ZERO;
962 self.last_significant_change = None;
963 }
964}
965
966impl<A: Float + Default + Clone + Sum + Send + Sync + Send + Sync> PerformanceAnomalyDetector<A> {
967 fn new(threshold: f64) -> Self {
968 Self {
969 threshold: A::from(threshold).expect("unwrap failed"),
970 historical_stats: HashMap::new(),
971 recent_anomalies: VecDeque::with_capacity(100),
972 adaptive_threshold: true,
973 }
974 }
975
976 fn check_for_anomalies(
977 &mut self,
978 snapshot: &PerformanceSnapshot<A>,
979 ) -> Result<Vec<PerformanceAnomaly<A>>, String> {
980 let mut anomalies = Vec::new();
981
982 let loss_anomaly = self.check_metric_anomaly("loss", snapshot.loss, snapshot.timestamp)?;
984 if let Some(anomaly) = loss_anomaly {
985 anomalies.push(anomaly);
986 }
987
988 if let Some(accuracy) = snapshot.accuracy {
990 let accuracy_anomaly =
991 self.check_metric_anomaly("accuracy", accuracy, snapshot.timestamp)?;
992 if let Some(anomaly) = accuracy_anomaly {
993 anomalies.push(anomaly);
994 }
995 }
996
997 Ok(anomalies)
998 }
999
1000 fn check_metric_anomaly(
1001 &mut self,
1002 metric_name: &str,
1003 value: A,
1004 timestamp: Instant,
1005 ) -> Result<Option<PerformanceAnomaly<A>>, String> {
1006 let stats = self
1008 .historical_stats
1009 .entry(metric_name.to_string())
1010 .or_insert_with(|| MetricStatistics {
1011 mean: value,
1012 variance: A::zero(),
1013 min_value: value,
1014 max_value: value,
1015 count: 0,
1016 last_update: timestamp,
1017 });
1018
1019 stats.count += 1;
1021 let delta = value - stats.mean;
1022 stats.mean = stats.mean + delta / A::from(stats.count).expect("unwrap failed");
1023 let delta2 = value - stats.mean;
1024 stats.variance = stats.variance + delta * delta2;
1025 stats.min_value = stats.min_value.min(value);
1026 stats.max_value = stats.max_value.max(value);
1027 stats.last_update = timestamp;
1028
1029 if stats.count >= 10 {
1031 let std_dev =
1032 (stats.variance / A::from(stats.count - 1).expect("unwrap failed")).sqrt();
1033 let z_score = (value - stats.mean) / std_dev.max(A::from(1e-8).expect("unwrap failed"));
1034
1035 if z_score.abs() > self.threshold {
1036 let severity = if z_score.abs() > A::from(3.0).expect("unwrap failed") {
1037 AnomalySeverity::Critical
1038 } else if z_score.abs() > A::from(2.5).expect("unwrap failed") {
1039 AnomalySeverity::Major
1040 } else {
1041 AnomalySeverity::Moderate
1042 };
1043
1044 let anomaly_type = if z_score > A::zero() {
1045 AnomalyType::High
1046 } else {
1047 AnomalyType::Low
1048 };
1049
1050 let expected_range = (
1051 stats.mean - self.threshold * std_dev,
1052 stats.mean + self.threshold * std_dev,
1053 );
1054
1055 let anomaly = PerformanceAnomaly {
1056 timestamp,
1057 metric_name: metric_name.to_string(),
1058 observed_value: value,
1059 expected_range,
1060 severity,
1061 anomaly_type,
1062 };
1063
1064 return Ok(Some(anomaly));
1065 }
1066 }
1067
1068 Ok(None)
1069 }
1070
1071 fn update_threshold(&mut self, new_threshold: A) {
1072 self.threshold = new_threshold;
1073 }
1074
1075 fn reset(&mut self) {
1076 self.historical_stats.clear();
1077 self.recent_anomalies.clear();
1078 }
1079}
1080
1081#[derive(Debug, Clone)]
1083pub struct PerformanceDiagnostics {
1084 pub history_size: usize,
1085 pub baseline_set: bool,
1086 pub trends_available: bool,
1087 pub anomalies_detected: usize,
1088 pub plateau_detected: bool,
1089 pub prediction_accuracy: f64,
1090}