Skip to main content

optirs_core/streaming/adaptive_streaming/
performance.rs

1// Performance tracking and prediction for streaming optimization
2//
3// This module provides comprehensive performance monitoring, trend analysis,
4// and prediction capabilities for streaming optimization scenarios, including
5// real-time metrics collection, statistical analysis, and predictive modeling.
6
7use super::config::*;
8use super::optimizer::{Adaptation, AdaptationPriority, AdaptationType, StreamingDataPoint};
9use super::resource_management::ResourceUsage;
10
11use scirs2_core::numeric::Float;
12use serde::{Deserialize, Serialize};
13use std::collections::{HashMap, VecDeque};
14use std::iter::Sum;
15use std::time::{Duration, Instant};
16
17/// Performance snapshot representing metrics at a specific point in time
18#[derive(Debug, Clone)]
19pub struct PerformanceSnapshot<A: Float + Send + Sync> {
20    /// Timestamp when snapshot was taken
21    pub timestamp: Instant,
22    /// Primary loss metric
23    pub loss: A,
24    /// Accuracy metric (if applicable)
25    pub accuracy: Option<A>,
26    /// Convergence rate
27    pub convergence_rate: Option<A>,
28    /// Gradient norm
29    pub gradient_norm: Option<A>,
30    /// Parameter update magnitude
31    pub parameter_update_magnitude: Option<A>,
32    /// Data quality statistics
33    pub data_statistics: DataStatistics<A>,
34    /// Resource usage at snapshot time
35    pub resource_usage: ResourceUsage,
36    /// Custom performance metrics
37    pub custom_metrics: HashMap<String, A>,
38}
39
40/// Data quality and distribution statistics
41#[derive(Debug, Clone)]
42pub struct DataStatistics<A: Float + Send + Sync> {
43    /// Number of samples in this batch
44    pub sample_count: usize,
45    /// Feature-wise means
46    pub feature_means: scirs2_core::ndarray::Array1<A>,
47    /// Feature-wise standard deviations
48    pub feature_stds: scirs2_core::ndarray::Array1<A>,
49    /// Average data quality score
50    pub average_quality: A,
51    /// Timestamp of statistics computation
52    pub timestamp: Instant,
53}
54
55impl<A: Float + Send + Sync> Default for DataStatistics<A> {
56    fn default() -> Self {
57        Self {
58            sample_count: 0,
59            feature_means: scirs2_core::ndarray::Array1::zeros(0),
60            feature_stds: scirs2_core::ndarray::Array1::zeros(0),
61            average_quality: A::zero(),
62            timestamp: Instant::now(),
63        }
64    }
65}
66
67/// Performance metrics for tracking and analysis
68#[derive(Debug, Clone)]
69pub enum PerformanceMetric<A: Float + Send + Sync> {
70    /// Loss function value
71    Loss(A),
72    /// Classification/regression accuracy
73    Accuracy(A),
74    /// Rate of convergence
75    ConvergenceRate(A),
76    /// Gradient magnitude
77    GradientNorm(A),
78    /// Learning rate effectiveness
79    LearningRateEffectiveness(A),
80    /// Resource utilization efficiency
81    ResourceEfficiency(A),
82    /// Data quality score
83    DataQuality(A),
84    /// Custom metric with name and value
85    Custom(String, A),
86}
87
88/// Context information for performance evaluation
89#[derive(Debug, Clone)]
90pub struct PerformanceContext<A: Float + Send + Sync> {
91    /// Current learning rate
92    pub learning_rate: A,
93    /// Current batch size
94    pub batch_size: usize,
95    /// Current buffer size
96    pub buffer_size: usize,
97    /// Recent drift detection status
98    pub drift_detected: bool,
99    /// Resource constraints
100    pub resource_constraints: ResourceUsage,
101    /// Time since last adaptation
102    pub time_since_adaptation: Duration,
103}
104
105/// Performance tracker for streaming optimization
106pub struct PerformanceTracker<A: Float + Send + Sync + std::iter::Sum> {
107    /// Configuration for performance tracking
108    config: PerformanceConfig,
109    /// Performance history
110    performance_history: VecDeque<PerformanceSnapshot<A>>,
111    /// Trend analyzer
112    trend_analyzer: PerformanceTrendAnalyzer<A>,
113    /// Performance predictor
114    predictor: PerformancePredictor<A>,
115    /// Performance baseline
116    baseline: Option<PerformanceSnapshot<A>>,
117    /// Current performance context
118    current_context: Option<PerformanceContext<A>>,
119    /// Performance improvement tracker
120    improvement_tracker: PerformanceImprovementTracker<A>,
121    /// Anomaly detector for performance
122    performance_anomaly_detector: PerformanceAnomalyDetector<A>,
123}
124
125/// Trend analysis for performance metrics
126pub struct PerformanceTrendAnalyzer<A: Float + Send + Sync> {
127    /// Window size for trend analysis
128    window_size: usize,
129    /// Current trends for different metrics
130    trends: HashMap<String, TrendData<A>>,
131    /// Trend computation methods
132    trend_methods: Vec<TrendMethod>,
133}
134
135/// Trend data for a specific metric
136#[derive(Debug, Clone)]
137pub struct TrendData<A: Float + Send + Sync> {
138    /// Linear trend slope
139    pub slope: A,
140    /// Trend correlation coefficient
141    pub correlation: A,
142    /// Trend volatility
143    pub volatility: A,
144    /// Trend confidence
145    pub confidence: A,
146    /// Recent values used for trend calculation
147    pub recent_values: VecDeque<A>,
148    /// Last update timestamp
149    pub last_update: Instant,
150}
151
152/// Methods for trend calculation
153#[derive(Debug, Clone)]
154pub enum TrendMethod {
155    /// Linear regression
156    LinearRegression,
157    /// Moving average
158    MovingAverage { window: usize },
159    /// Exponential smoothing
160    ExponentialSmoothing { alpha: f64 },
161    /// Seasonal decomposition
162    SeasonalDecomposition,
163}
164
165/// Performance predictor using various forecasting methods
166pub struct PerformancePredictor<A: Float + Send + Sync> {
167    /// Prediction methods to use
168    prediction_methods: Vec<PredictionMethod>,
169    /// Historical predictions for accuracy tracking
170    prediction_history: VecDeque<PredictionResult<A>>,
171    /// Model accuracy scores
172    model_accuracies: HashMap<String, A>,
173    /// Ensemble weights for combining predictions
174    ensemble_weights: HashMap<String, A>,
175}
176
177/// Prediction methods for performance forecasting
178#[derive(Debug, Clone)]
179pub enum PredictionMethod {
180    /// Linear extrapolation
181    Linear,
182    /// Exponential smoothing
183    Exponential { alpha: f64, beta: f64 },
184    /// ARIMA model
185    ARIMA { p: usize, d: usize, q: usize },
186    /// Neural network
187    NeuralNetwork { hidden_layers: Vec<usize> },
188    /// Ensemble of multiple methods
189    Ensemble,
190}
191
192/// Result of performance prediction
193#[derive(Debug, Clone)]
194pub struct PredictionResult<A: Float + Send + Sync> {
195    /// Predicted metric value
196    pub predicted_value: A,
197    /// Prediction confidence interval
198    pub confidence_interval: (A, A),
199    /// Prediction method used
200    pub method: String,
201    /// Steps ahead predicted
202    pub steps_ahead: usize,
203    /// Prediction timestamp
204    pub timestamp: Instant,
205    /// Actual value (filled in later for accuracy assessment)
206    pub actual_value: Option<A>,
207}
208
209/// Performance improvement tracking
210pub struct PerformanceImprovementTracker<A: Float + Send + Sync> {
211    /// Baseline performance metrics
212    baseline_metrics: HashMap<String, A>,
213    /// Current improvement rates
214    improvement_rates: HashMap<String, A>,
215    /// Improvement history
216    improvement_history: VecDeque<ImprovementEvent<A>>,
217    /// Plateau detection
218    plateau_detector: PlateauDetector<A>,
219}
220
221/// Performance improvement event
222#[derive(Debug, Clone)]
223pub struct ImprovementEvent<A: Float + Send + Sync> {
224    /// Event timestamp
225    pub timestamp: Instant,
226    /// Metric that improved
227    pub metric_name: String,
228    /// Improvement magnitude
229    pub improvement: A,
230    /// Improvement rate (per unit time)
231    pub improvement_rate: A,
232    /// Context when improvement occurred
233    pub context: String,
234}
235
236/// Plateau detection for performance metrics
237pub struct PlateauDetector<A: Float + Send + Sync> {
238    /// Window size for plateau detection
239    window_size: usize,
240    /// Plateau threshold (minimum change for non-plateau)
241    plateau_threshold: A,
242    /// Recent performance values
243    recent_values: VecDeque<A>,
244    /// Current plateau status
245    is_plateau: bool,
246    /// Plateau duration
247    plateau_duration: Duration,
248    /// Last significant change timestamp
249    last_significant_change: Option<Instant>,
250}
251
252/// Anomaly detection for performance metrics
253pub struct PerformanceAnomalyDetector<A: Float + Send + Sync> {
254    /// Anomaly detection threshold (standard deviations)
255    threshold: A,
256    /// Historical statistics for anomaly detection
257    historical_stats: HashMap<String, MetricStatistics<A>>,
258    /// Recent anomalies detected
259    recent_anomalies: VecDeque<PerformanceAnomaly<A>>,
260    /// Adaptive threshold adjustment
261    adaptive_threshold: bool,
262}
263
264/// Statistics for a performance metric
265#[derive(Debug, Clone)]
266pub struct MetricStatistics<A: Float + Send + Sync> {
267    /// Running mean
268    pub mean: A,
269    /// Running variance
270    pub variance: A,
271    /// Minimum observed value
272    pub min_value: A,
273    /// Maximum observed value
274    pub max_value: A,
275    /// Number of observations
276    pub count: usize,
277    /// Last update timestamp
278    pub last_update: Instant,
279}
280
281/// Performance anomaly event
282#[derive(Debug, Clone)]
283pub struct PerformanceAnomaly<A: Float + Send + Sync> {
284    /// Anomaly timestamp
285    pub timestamp: Instant,
286    /// Affected metric
287    pub metric_name: String,
288    /// Observed value
289    pub observed_value: A,
290    /// Expected value range
291    pub expected_range: (A, A),
292    /// Anomaly severity
293    pub severity: AnomalySeverity,
294    /// Anomaly type
295    pub anomaly_type: AnomalyType,
296}
297
298/// Severity levels for performance anomalies
299#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
300pub enum AnomalySeverity {
301    /// Minor anomaly
302    Minor,
303    /// Moderate anomaly requiring attention
304    Moderate,
305    /// Major anomaly requiring intervention
306    Major,
307    /// Critical anomaly requiring immediate action
308    Critical,
309}
310
311/// Types of performance anomalies
312#[derive(Debug, Clone, PartialEq, Eq)]
313pub enum AnomalyType {
314    /// Value significantly higher than expected
315    High,
316    /// Value significantly lower than expected
317    Low,
318    /// Sudden change in trend
319    TrendChange,
320    /// Unexpected oscillation
321    Oscillation,
322    /// Performance degradation
323    Degradation,
324    /// Performance plateau
325    Plateau,
326}
327
328impl<A: Float + Default + Clone + std::iter::Sum + Send + Sync + std::fmt::Debug>
329    PerformanceTracker<A>
330{
331    /// Creates a new performance tracker
332    pub fn new(config: &StreamingConfig) -> Result<Self, String> {
333        let performance_config = config.performance_config.clone();
334
335        let trend_analyzer = PerformanceTrendAnalyzer::new(performance_config.trend_window_size);
336        let predictor = PerformancePredictor::new();
337        let improvement_tracker = PerformanceImprovementTracker::new();
338        let performance_anomaly_detector = PerformanceAnomalyDetector::new(2.0); // 2 sigma threshold
339
340        Ok(Self {
341            config: performance_config,
342            performance_history: VecDeque::with_capacity(1000),
343            trend_analyzer,
344            predictor,
345            baseline: None,
346            current_context: None,
347            improvement_tracker,
348            performance_anomaly_detector,
349        })
350    }
351
352    /// Adds a new performance snapshot
353    pub fn add_performance(&mut self, snapshot: PerformanceSnapshot<A>) -> Result<(), String> {
354        // Store in history
355        if self.performance_history.len() >= self.config.history_size {
356            self.performance_history.pop_front();
357        }
358        self.performance_history.push_back(snapshot.clone());
359
360        // Set baseline if this is the first measurement
361        if self.baseline.is_none() {
362            self.baseline = Some(snapshot.clone());
363        }
364
365        // Update trend analysis
366        if self.config.enable_trend_analysis {
367            self.trend_analyzer.update(&snapshot)?;
368        }
369
370        // Update improvement tracking
371        self.improvement_tracker.update(&snapshot)?;
372
373        // Check for performance anomalies
374        let anomalies = self
375            .performance_anomaly_detector
376            .check_for_anomalies(&snapshot)?;
377        if !anomalies.is_empty() {
378            // Handle detected anomalies
379            self.handle_performance_anomalies(&anomalies)?;
380        }
381
382        // Update predictions if enabled
383        if self.config.enable_prediction {
384            self.predictor.update_with_actual(&snapshot)?;
385        }
386
387        Ok(())
388    }
389
390    /// Gets recent performance snapshots
391    pub fn get_recent_performance(&self, count: usize) -> Vec<PerformanceSnapshot<A>> {
392        self.performance_history
393            .iter()
394            .rev()
395            .take(count)
396            .cloned()
397            .collect()
398    }
399
400    /// Gets recent loss values for trend analysis
401    pub fn get_recent_losses(&self, count: usize) -> Vec<A> {
402        self.performance_history
403            .iter()
404            .rev()
405            .take(count)
406            .map(|snapshot| snapshot.loss)
407            .collect()
408    }
409
410    /// Predicts future performance
411    pub fn predict_performance(
412        &mut self,
413        steps_ahead: usize,
414    ) -> Result<PredictionResult<A>, String> {
415        if !self.config.enable_prediction {
416            return Err("Performance prediction is disabled".to_string());
417        }
418
419        self.predictor
420            .predict(steps_ahead, &self.performance_history)
421    }
422
423    /// Gets current performance trends
424    pub fn get_performance_trends(&self) -> HashMap<String, TrendData<A>> {
425        self.trend_analyzer.get_current_trends()
426    }
427
428    /// Computes adaptation for performance thresholds
429    pub fn apply_threshold_adaptation(&mut self, adaptation: &Adaptation<A>) -> Result<(), String> {
430        if adaptation.adaptation_type == AdaptationType::PerformanceThreshold {
431            // Adjust anomaly detection thresholds
432            let new_threshold = self.performance_anomaly_detector.threshold + adaptation.magnitude;
433            self.performance_anomaly_detector
434                .update_threshold(new_threshold);
435        }
436        Ok(())
437    }
438
439    /// Handles detected performance anomalies
440    fn handle_performance_anomalies(
441        &mut self,
442        anomalies: &[PerformanceAnomaly<A>],
443    ) -> Result<(), String> {
444        for anomaly in anomalies {
445            match anomaly.severity {
446                AnomalySeverity::Critical | AnomalySeverity::Major => {
447                    // Log critical anomalies for immediate attention
448                    println!("Critical performance anomaly detected: {:?}", anomaly);
449                }
450                _ => {
451                    // Store for analysis
452                    self.performance_anomaly_detector
453                        .recent_anomalies
454                        .push_back(anomaly.clone());
455                }
456            }
457        }
458        Ok(())
459    }
460
461    /// Resets performance tracking
462    pub fn reset(&mut self) -> Result<(), String> {
463        self.performance_history.clear();
464        self.baseline = None;
465        self.current_context = None;
466        self.trend_analyzer.reset();
467        self.predictor.reset();
468        self.improvement_tracker.reset();
469        self.performance_anomaly_detector.reset();
470        Ok(())
471    }
472
473    /// Gets diagnostic information
474    pub fn get_diagnostics(&self) -> PerformanceDiagnostics {
475        PerformanceDiagnostics {
476            history_size: self.performance_history.len(),
477            baseline_set: self.baseline.is_some(),
478            trends_available: !self.trend_analyzer.trends.is_empty(),
479            anomalies_detected: self.performance_anomaly_detector.recent_anomalies.len(),
480            plateau_detected: self.improvement_tracker.plateau_detector.is_plateau,
481            prediction_accuracy: self.predictor.get_average_accuracy(),
482        }
483    }
484}
485
486impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PerformanceTrendAnalyzer<A> {
487    fn new(window_size: usize) -> Self {
488        Self {
489            window_size,
490            trends: HashMap::new(),
491            trend_methods: vec![
492                TrendMethod::LinearRegression,
493                TrendMethod::MovingAverage {
494                    window: window_size / 2,
495                },
496                TrendMethod::ExponentialSmoothing { alpha: 0.3 },
497            ],
498        }
499    }
500
501    fn update(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
502        // Update trends for different metrics
503        self.update_metric_trend("loss", snapshot.loss)?;
504
505        if let Some(accuracy) = snapshot.accuracy {
506            self.update_metric_trend("accuracy", accuracy)?;
507        }
508
509        if let Some(convergence) = snapshot.convergence_rate {
510            self.update_metric_trend("convergence", convergence)?;
511        }
512
513        self.compute_trends()?;
514        Ok(())
515    }
516
517    fn update_metric_trend(&mut self, metric_name: &str, value: A) -> Result<(), String> {
518        let trend_data = self
519            .trends
520            .entry(metric_name.to_string())
521            .or_insert_with(|| TrendData {
522                slope: A::zero(),
523                correlation: A::zero(),
524                volatility: A::zero(),
525                confidence: A::zero(),
526                recent_values: VecDeque::with_capacity(self.window_size),
527                last_update: Instant::now(),
528            });
529
530        if trend_data.recent_values.len() >= self.window_size {
531            trend_data.recent_values.pop_front();
532        }
533        trend_data.recent_values.push_back(value);
534        trend_data.last_update = Instant::now();
535
536        Ok(())
537    }
538
539    fn compute_trends(&mut self) -> Result<(), String> {
540        let keys: Vec<_> = self.trends.keys().cloned().collect();
541
542        // Collect all computed values first
543        let mut computed_values = Vec::new();
544        for metric_name in &keys {
545            if let Some(trend_data) = self.trends.get(metric_name) {
546                if trend_data.recent_values.len() >= 3 {
547                    let values = trend_data.recent_values.clone();
548                    let slope = self.compute_slope(&values)?;
549                    let correlation = self.compute_correlation(&values)?;
550                    let volatility = self.compute_volatility(&values)?;
551                    let confidence = self.compute_confidence(&values)?;
552                    computed_values.push((
553                        metric_name.clone(),
554                        slope,
555                        correlation,
556                        volatility,
557                        confidence,
558                    ));
559                }
560            }
561        }
562
563        // Now update the trend data
564        for (metric_name, slope, correlation, volatility, confidence) in computed_values {
565            if let Some(trend_data) = self.trends.get_mut(&metric_name) {
566                trend_data.slope = slope;
567                trend_data.correlation = correlation;
568                trend_data.volatility = volatility;
569                trend_data.confidence = confidence;
570            }
571        }
572
573        Ok(())
574    }
575
576    fn compute_slope(&self, values: &VecDeque<A>) -> Result<A, String> {
577        if values.len() < 2 {
578            return Ok(A::zero());
579        }
580
581        let n = A::from(values.len()).expect("unwrap failed");
582        // Compute sum_x = 1 + 2 + ... + n = n*(n+1)/2
583        let sum_x = n * (n + A::one()) / A::from(2.0).expect("unwrap failed");
584        let sum_y = values.iter().cloned().sum::<A>();
585        let sum_xy = values
586            .iter()
587            .enumerate()
588            .map(|(i, &y)| A::from(i + 1).expect("unwrap failed") * y)
589            .sum::<A>();
590        // Compute sum_x_squared = 1^2 + 2^2 + ... + n^2 = n*(n+1)*(2n+1)/6
591        let two = A::from(2.0).expect("unwrap failed");
592        let six = A::from(6.0).expect("unwrap failed");
593        let sum_x_squared = n * (n + A::one()) * (two * n + A::one()) / six;
594
595        let denominator = n * sum_x_squared - sum_x * sum_x;
596        if denominator == A::zero() {
597            return Ok(A::zero());
598        }
599
600        let slope = (n * sum_xy - sum_x * sum_y) / denominator;
601        Ok(slope)
602    }
603
604    fn compute_correlation(&self, values: &VecDeque<A>) -> Result<A, String> {
605        if values.len() < 2 {
606            return Ok(A::zero());
607        }
608
609        // Simplified correlation with time index
610        let n = values.len();
611        let time_values: Vec<A> = (1..=n)
612            .map(|i| A::from(i).expect("unwrap failed"))
613            .collect();
614        let value_vec: Vec<A> = values.iter().cloned().collect();
615
616        let mean_time = time_values.iter().cloned().sum::<A>() / A::from(n).expect("unwrap failed");
617        let mean_value = value_vec.iter().cloned().sum::<A>() / A::from(n).expect("unwrap failed");
618
619        let numerator = time_values
620            .iter()
621            .zip(value_vec.iter())
622            .map(|(&t, &v)| (t - mean_time) * (v - mean_value))
623            .sum::<A>();
624
625        let time_variance = time_values
626            .iter()
627            .map(|&t| (t - mean_time) * (t - mean_time))
628            .sum::<A>();
629
630        let value_variance = value_vec
631            .iter()
632            .map(|&v| (v - mean_value) * (v - mean_value))
633            .sum::<A>();
634
635        let denominator = (time_variance * value_variance).sqrt();
636        if denominator == A::zero() {
637            return Ok(A::zero());
638        }
639
640        Ok(numerator / denominator)
641    }
642
643    fn compute_volatility(&self, values: &VecDeque<A>) -> Result<A, String> {
644        if values.len() < 2 {
645            return Ok(A::zero());
646        }
647
648        let mean =
649            values.iter().cloned().sum::<A>() / A::from(values.len()).expect("unwrap failed");
650        let variance = values.iter().map(|&v| (v - mean) * (v - mean)).sum::<A>()
651            / A::from(values.len()).expect("unwrap failed");
652
653        Ok(variance.sqrt())
654    }
655
656    fn compute_confidence(&self, values: &VecDeque<A>) -> Result<A, String> {
657        // Simple confidence based on trend consistency
658        if values.len() < 3 {
659            return Ok(A::zero());
660        }
661
662        let slope = self.compute_slope(values)?;
663        let correlation = self.compute_correlation(values)?;
664
665        // Confidence increases with stronger correlation and consistent slope direction
666        let confidence = correlation.abs() * (A::one() - (slope.abs() / (slope.abs() + A::one())));
667        Ok(confidence)
668    }
669
670    fn get_current_trends(&self) -> HashMap<String, TrendData<A>> {
671        self.trends.clone()
672    }
673
674    fn reset(&mut self) {
675        self.trends.clear();
676    }
677}
678
679impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PerformancePredictor<A> {
680    fn new() -> Self {
681        Self {
682            prediction_methods: vec![
683                PredictionMethod::Linear,
684                PredictionMethod::Exponential {
685                    alpha: 0.3,
686                    beta: 0.1,
687                },
688            ],
689            prediction_history: VecDeque::with_capacity(1000),
690            model_accuracies: HashMap::new(),
691            ensemble_weights: HashMap::new(),
692        }
693    }
694
695    fn predict(
696        &mut self,
697        steps_ahead: usize,
698        history: &VecDeque<PerformanceSnapshot<A>>,
699    ) -> Result<PredictionResult<A>, String> {
700        if history.len() < 2 {
701            return Err("Insufficient history for prediction".to_string());
702        }
703
704        // Extract loss values for prediction
705        let loss_values: Vec<A> = history.iter().map(|s| s.loss).collect();
706
707        // Use linear prediction for simplicity
708        let predicted_value = self.linear_prediction(&loss_values, steps_ahead)?;
709
710        // Estimate confidence interval (simplified)
711        let recent_volatility = self.compute_recent_volatility(&loss_values)?;
712        let confidence_interval = (
713            predicted_value - recent_volatility,
714            predicted_value + recent_volatility,
715        );
716
717        let prediction = PredictionResult {
718            predicted_value,
719            confidence_interval,
720            method: "linear".to_string(),
721            steps_ahead,
722            timestamp: Instant::now(),
723            actual_value: None,
724        };
725
726        // Store prediction for later accuracy assessment
727        if self.prediction_history.len() >= 1000 {
728            self.prediction_history.pop_front();
729        }
730        self.prediction_history.push_back(prediction.clone());
731
732        Ok(prediction)
733    }
734
735    fn linear_prediction(&self, values: &[A], steps_ahead: usize) -> Result<A, String> {
736        if values.len() < 2 {
737            return Ok(A::zero());
738        }
739
740        // Simple linear extrapolation using last two points
741        let n = values.len();
742        let x1 = A::from(n - 1).expect("unwrap failed");
743        let y1 = values[n - 1];
744        let x2 = A::from(n).expect("unwrap failed");
745        let y2 = values[n - 1]; // Use same point for stability
746
747        // Use trend from last few points
748        if n >= 3 {
749            let slope = (values[n - 1] - values[n - 3]) / A::from(2).expect("unwrap failed");
750            let predicted = values[n - 1] + slope * A::from(steps_ahead).expect("unwrap failed");
751            Ok(predicted)
752        } else {
753            Ok(values[n - 1])
754        }
755    }
756
757    fn exponential_prediction(&self, values: &[A], steps_ahead: usize) -> Result<A, String> {
758        if values.is_empty() {
759            return Ok(A::zero());
760        }
761
762        // Simple exponential smoothing
763        let alpha = A::from(0.3).expect("unwrap failed");
764        let mut forecast = values[0];
765
766        for &value in values.iter().skip(1) {
767            forecast = alpha * value + (A::one() - alpha) * forecast;
768        }
769
770        // Project forward (simplified)
771        for _ in 0..steps_ahead {
772            forecast = forecast * A::from(0.99).expect("unwrap failed"); // Assume slight improvement
773        }
774
775        Ok(forecast)
776    }
777
778    fn compute_recent_volatility(&self, values: &[A]) -> Result<A, String> {
779        if values.len() < 2 {
780            return Ok(A::zero());
781        }
782
783        let recent_count = values.len().min(10);
784        let recent_values = &values[values.len() - recent_count..];
785
786        let mean = recent_values.iter().cloned().sum::<A>()
787            / A::from(recent_count).expect("unwrap failed");
788        let variance = recent_values
789            .iter()
790            .map(|&v| (v - mean) * (v - mean))
791            .sum::<A>()
792            / A::from(recent_count).expect("unwrap failed");
793
794        Ok(variance.sqrt())
795    }
796
797    fn update_with_actual(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
798        // Update prediction accuracy by matching actual values with predictions
799        let mut updated_predictions = Vec::new();
800
801        for prediction in &mut self.prediction_history {
802            if prediction.actual_value.is_none() {
803                let time_diff = snapshot.timestamp.duration_since(prediction.timestamp);
804                let expected_duration = Duration::from_secs(prediction.steps_ahead as u64 * 10); // Assume 10s per step
805
806                if time_diff >= expected_duration {
807                    prediction.actual_value = Some(snapshot.loss);
808                    updated_predictions.push(prediction.clone());
809                }
810            }
811        }
812
813        // Update accuracy metrics for all updated predictions
814        for prediction in &updated_predictions {
815            self.update_accuracy_metrics(prediction)?;
816        }
817
818        Ok(())
819    }
820
821    fn update_accuracy_metrics(&mut self, prediction: &PredictionResult<A>) -> Result<(), String> {
822        if let Some(actual) = prediction.actual_value {
823            let error = (prediction.predicted_value - actual).abs();
824            let relative_error = error / actual.max(A::from(1e-8).expect("unwrap failed"));
825
826            // Update accuracy for this method
827            let accuracy = A::one() - relative_error.min(A::one());
828            let method_name = &prediction.method;
829
830            self.model_accuracies.insert(method_name.clone(), accuracy);
831        }
832
833        Ok(())
834    }
835
836    fn get_average_accuracy(&self) -> f64 {
837        if self.model_accuracies.is_empty() {
838            return 0.0;
839        }
840
841        let sum: A = self.model_accuracies.values().cloned().sum();
842        let avg = sum / A::from(self.model_accuracies.len()).expect("unwrap failed");
843        avg.to_f64().unwrap_or(0.0)
844    }
845
846    fn reset(&mut self) {
847        self.prediction_history.clear();
848        self.model_accuracies.clear();
849        self.ensemble_weights.clear();
850    }
851}
852
853impl<A: Float + Default + Clone + Sum + Send + Sync + Send + Sync>
854    PerformanceImprovementTracker<A>
855{
856    fn new() -> Self {
857        Self {
858            baseline_metrics: HashMap::new(),
859            improvement_rates: HashMap::new(),
860            improvement_history: VecDeque::with_capacity(1000),
861            plateau_detector: PlateauDetector::new(50, A::from(0.01).expect("unwrap failed")),
862        }
863    }
864
865    fn update(&mut self, snapshot: &PerformanceSnapshot<A>) -> Result<(), String> {
866        // Update baseline if not set
867        if self.baseline_metrics.is_empty() {
868            self.baseline_metrics
869                .insert("loss".to_string(), snapshot.loss);
870            if let Some(accuracy) = snapshot.accuracy {
871                self.baseline_metrics
872                    .insert("accuracy".to_string(), accuracy);
873            }
874        }
875
876        // Check for improvements
877        if let Some(&baseline_loss) = self.baseline_metrics.get("loss") {
878            if snapshot.loss < baseline_loss {
879                let improvement = baseline_loss - snapshot.loss;
880                let improvement_event = ImprovementEvent {
881                    timestamp: snapshot.timestamp,
882                    metric_name: "loss".to_string(),
883                    improvement,
884                    improvement_rate: improvement / A::from(1.0).expect("unwrap failed"), // Simplified rate
885                    context: "optimization_step".to_string(),
886                };
887
888                if self.improvement_history.len() >= 1000 {
889                    self.improvement_history.pop_front();
890                }
891                self.improvement_history.push_back(improvement_event);
892
893                // Update baseline
894                self.baseline_metrics
895                    .insert("loss".to_string(), snapshot.loss);
896            }
897        }
898
899        // Update plateau detector
900        self.plateau_detector.update(snapshot.loss);
901
902        Ok(())
903    }
904
905    fn reset(&mut self) {
906        self.baseline_metrics.clear();
907        self.improvement_rates.clear();
908        self.improvement_history.clear();
909        self.plateau_detector.reset();
910    }
911}
912
913impl<A: Float + Default + Clone + Send + Sync + std::iter::Sum> PlateauDetector<A> {
914    fn new(window_size: usize, threshold: A) -> Self {
915        Self {
916            window_size,
917            plateau_threshold: threshold,
918            recent_values: VecDeque::with_capacity(window_size),
919            is_plateau: false,
920            plateau_duration: Duration::ZERO,
921            last_significant_change: None,
922        }
923    }
924
925    fn update(&mut self, value: A) {
926        if self.recent_values.len() >= self.window_size {
927            self.recent_values.pop_front();
928        }
929        self.recent_values.push_back(value);
930
931        if self.recent_values.len() >= self.window_size {
932            self.detect_plateau();
933        }
934    }
935
936    fn detect_plateau(&mut self) {
937        if self.recent_values.len() < 2 {
938            return;
939        }
940
941        let max_val = self.recent_values.iter().cloned().fold(A::zero(), A::max);
942        let min_val = self.recent_values.iter().cloned().fold(A::zero(), A::min);
943        let range = max_val - min_val;
944
945        let was_plateau = self.is_plateau;
946        self.is_plateau = range < self.plateau_threshold;
947
948        if self.is_plateau && !was_plateau {
949            self.plateau_duration = Duration::ZERO;
950        } else if self.is_plateau {
951            self.plateau_duration += Duration::from_secs(1); // Simplified
952        } else if !self.is_plateau {
953            self.last_significant_change = Some(Instant::now());
954            self.plateau_duration = Duration::ZERO;
955        }
956    }
957
958    fn reset(&mut self) {
959        self.recent_values.clear();
960        self.is_plateau = false;
961        self.plateau_duration = Duration::ZERO;
962        self.last_significant_change = None;
963    }
964}
965
966impl<A: Float + Default + Clone + Sum + Send + Sync + Send + Sync> PerformanceAnomalyDetector<A> {
967    fn new(threshold: f64) -> Self {
968        Self {
969            threshold: A::from(threshold).expect("unwrap failed"),
970            historical_stats: HashMap::new(),
971            recent_anomalies: VecDeque::with_capacity(100),
972            adaptive_threshold: true,
973        }
974    }
975
976    fn check_for_anomalies(
977        &mut self,
978        snapshot: &PerformanceSnapshot<A>,
979    ) -> Result<Vec<PerformanceAnomaly<A>>, String> {
980        let mut anomalies = Vec::new();
981
982        // Check loss anomaly
983        let loss_anomaly = self.check_metric_anomaly("loss", snapshot.loss, snapshot.timestamp)?;
984        if let Some(anomaly) = loss_anomaly {
985            anomalies.push(anomaly);
986        }
987
988        // Check accuracy anomaly if available
989        if let Some(accuracy) = snapshot.accuracy {
990            let accuracy_anomaly =
991                self.check_metric_anomaly("accuracy", accuracy, snapshot.timestamp)?;
992            if let Some(anomaly) = accuracy_anomaly {
993                anomalies.push(anomaly);
994            }
995        }
996
997        Ok(anomalies)
998    }
999
1000    fn check_metric_anomaly(
1001        &mut self,
1002        metric_name: &str,
1003        value: A,
1004        timestamp: Instant,
1005    ) -> Result<Option<PerformanceAnomaly<A>>, String> {
1006        // Update statistics for this metric
1007        let stats = self
1008            .historical_stats
1009            .entry(metric_name.to_string())
1010            .or_insert_with(|| MetricStatistics {
1011                mean: value,
1012                variance: A::zero(),
1013                min_value: value,
1014                max_value: value,
1015                count: 0,
1016                last_update: timestamp,
1017            });
1018
1019        // Update running statistics
1020        stats.count += 1;
1021        let delta = value - stats.mean;
1022        stats.mean = stats.mean + delta / A::from(stats.count).expect("unwrap failed");
1023        let delta2 = value - stats.mean;
1024        stats.variance = stats.variance + delta * delta2;
1025        stats.min_value = stats.min_value.min(value);
1026        stats.max_value = stats.max_value.max(value);
1027        stats.last_update = timestamp;
1028
1029        // Check for anomaly after sufficient samples
1030        if stats.count >= 10 {
1031            let std_dev =
1032                (stats.variance / A::from(stats.count - 1).expect("unwrap failed")).sqrt();
1033            let z_score = (value - stats.mean) / std_dev.max(A::from(1e-8).expect("unwrap failed"));
1034
1035            if z_score.abs() > self.threshold {
1036                let severity = if z_score.abs() > A::from(3.0).expect("unwrap failed") {
1037                    AnomalySeverity::Critical
1038                } else if z_score.abs() > A::from(2.5).expect("unwrap failed") {
1039                    AnomalySeverity::Major
1040                } else {
1041                    AnomalySeverity::Moderate
1042                };
1043
1044                let anomaly_type = if z_score > A::zero() {
1045                    AnomalyType::High
1046                } else {
1047                    AnomalyType::Low
1048                };
1049
1050                let expected_range = (
1051                    stats.mean - self.threshold * std_dev,
1052                    stats.mean + self.threshold * std_dev,
1053                );
1054
1055                let anomaly = PerformanceAnomaly {
1056                    timestamp,
1057                    metric_name: metric_name.to_string(),
1058                    observed_value: value,
1059                    expected_range,
1060                    severity,
1061                    anomaly_type,
1062                };
1063
1064                return Ok(Some(anomaly));
1065            }
1066        }
1067
1068        Ok(None)
1069    }
1070
1071    fn update_threshold(&mut self, new_threshold: A) {
1072        self.threshold = new_threshold;
1073    }
1074
1075    fn reset(&mut self) {
1076        self.historical_stats.clear();
1077        self.recent_anomalies.clear();
1078    }
1079}
1080
1081/// Diagnostic information for performance tracking
1082#[derive(Debug, Clone)]
1083pub struct PerformanceDiagnostics {
1084    pub history_size: usize,
1085    pub baseline_set: bool,
1086    pub trends_available: bool,
1087    pub anomalies_detected: usize,
1088    pub plateau_detected: bool,
1089    pub prediction_accuracy: f64,
1090}