Skip to main content

optirs_bench/
performance_regression_detector.rs

1// Advanced Performance Regression Detection System
2//
3// This module provides comprehensive performance regression detection capabilities
4// with statistical analysis, trend detection, and automated alert generation for
5// CI/CD integration and continuous performance monitoring.
6
7use crate::error::{OptimError, Result};
8use serde::{Deserialize, Serialize};
9use std::collections::{HashMap, VecDeque};
10use std::path::PathBuf;
11use std::time::{Duration, SystemTime, UNIX_EPOCH};
12
13/// Main performance regression detection engine
14#[derive(Debug)]
15pub struct PerformanceRegressionDetector {
16    /// Configuration for regression detection
17    config: RegressionConfig,
18    /// Historical performance data
19    historical_data: PerformanceDatabase,
20    /// Statistical analyzer
21    statistical_analyzer: StatisticalAnalyzer,
22    /// Regression analyzer
23    regression_analyzer: RegressionAnalyzer,
24    /// Alert system
25    alert_system: AlertSystem,
26    /// Current baseline metrics
27    baseline_metrics: Option<BaselineMetrics>,
28}
29
30/// Configuration for regression detection
31#[derive(Debug, Clone, Serialize, Deserialize)]
32pub struct RegressionConfig {
33    /// Enable regression detection
34    pub enable_detection: bool,
35    /// Confidence threshold for regression detection (0.0 to 1.0)
36    pub confidence_threshold: f64,
37    /// Performance degradation threshold (e.g., 0.05 = 5% slower)
38    pub degradation_threshold: f64,
39    /// Minimum samples needed for statistical analysis
40    pub min_samples: usize,
41    /// Maximum historical data to keep
42    pub max_history_size: usize,
43    /// Performance metrics to track
44    pub tracked_metrics: Vec<MetricType>,
45    /// Statistical test type
46    pub statistical_test: StatisticalTest,
47    /// Regression detection sensitivity
48    pub sensitivity: RegressionSensitivity,
49    /// Baseline update strategy
50    pub baseline_strategy: BaselineStrategy,
51    /// Alert thresholds
52    pub alert_thresholds: AlertThresholds,
53    /// CI/CD integration settings
54    pub ci_cd_config: CiCdConfig,
55}
56
57/// Types of performance metrics to track
58#[derive(Debug, Clone, Serialize, Deserialize)]
59pub enum MetricType {
60    /// Execution time metrics
61    ExecutionTime,
62    /// Memory usage metrics
63    MemoryUsage,
64    /// Throughput metrics (operations per second)
65    Throughput,
66    /// CPU utilization
67    CpuUtilization,
68    /// GPU utilization
69    GpuUtilization,
70    /// Cache hit rates
71    CacheHitRate,
72    /// FLOPS (floating point operations per second)
73    Flops,
74    /// Convergence rate
75    ConvergenceRate,
76    /// Error rate
77    ErrorRate,
78    /// Custom metric
79    Custom(String),
80}
81
82/// Statistical test types for regression detection
83#[derive(Debug, Clone, Serialize, Deserialize)]
84pub enum StatisticalTest {
85    /// Mann-Whitney U test (non-parametric)
86    MannWhitneyU,
87    /// Student's t-test (parametric)
88    StudentTTest,
89    /// Wilcoxon signed-rank test
90    WilcoxonSignedRank,
91    /// Kolmogorov-Smirnov test
92    KolmogorovSmirnov,
93    /// Custom statistical test
94    Custom(String),
95}
96
97/// Regression detection sensitivity levels
98#[derive(Debug, Clone, Serialize, Deserialize)]
99pub enum RegressionSensitivity {
100    /// Very sensitive - detects small changes
101    VeryHigh,
102    /// High sensitivity
103    High,
104    /// Medium sensitivity (balanced)
105    Medium,
106    /// Low sensitivity - only major regressions
107    Low,
108    /// Very low sensitivity
109    VeryLow,
110}
111
112/// Baseline update strategies
113#[derive(Debug, Clone, Serialize, Deserialize)]
114pub enum BaselineStrategy {
115    /// Update baseline automatically when performance improves
116    AutoImprovement,
117    /// Update baseline only manually
118    Manual,
119    /// Update baseline on successful releases
120    OnRelease,
121    /// Rolling window baseline
122    RollingWindow(usize),
123    /// Seasonal baseline (accounts for cyclical patterns)
124    Seasonal,
125}
126
127/// Alert threshold configuration
128#[derive(Debug, Clone, Serialize, Deserialize)]
129pub struct AlertThresholds {
130    /// Performance degradation thresholds (percentage)
131    pub degradation_thresholds: HashMap<MetricType, f64>,
132    /// Memory increase thresholds (percentage)
133    pub memory_increase_thresholds: HashMap<String, f64>,
134    /// Failure rate thresholds
135    pub failure_rate_threshold: f64,
136    /// Timeout thresholds (seconds)
137    pub timeout_threshold: f64,
138}
139
140/// CI/CD integration configuration
141#[derive(Debug, Clone, Serialize, Deserialize)]
142pub struct CiCdConfig {
143    /// Enable CI/CD integration
144    pub enabled: bool,
145    /// Exit with error code on regression
146    pub fail_on_regression: bool,
147    /// Generate performance reports for CI
148    pub generate_reports: bool,
149    /// Report output format
150    pub report_format: ReportFormat,
151    /// Report output path
152    pub report_path: PathBuf,
153    /// Webhook URLs for notifications
154    pub webhook_urls: Vec<String>,
155    /// Slack integration settings
156    pub slack_config: Option<SlackConfig>,
157    /// Email notification settings
158    pub email_config: Option<EmailConfig>,
159}
160
161/// Report output formats
162#[derive(Debug, Clone, Serialize, Deserialize)]
163pub enum ReportFormat {
164    Json,
165    Xml,
166    Html,
167    Markdown,
168    JUnit,
169    TeamCity,
170}
171
172/// Slack notification configuration
173#[derive(Debug, Clone, Serialize, Deserialize)]
174pub struct SlackConfig {
175    pub webhook_url: String,
176    pub channel: String,
177    pub username: String,
178    pub icon_emoji: String,
179}
180
181/// Email notification configuration
182#[derive(Debug, Clone, Serialize, Deserialize)]
183pub struct EmailConfig {
184    pub smtp_server: String,
185    pub smtp_port: u16,
186    pub username: String,
187    pub password: String,
188    pub from_address: String,
189    pub to_addresses: Vec<String>,
190}
191
192/// Performance database for historical data storage
193#[derive(Debug)]
194#[allow(dead_code)]
195pub struct PerformanceDatabase {
196    /// Historical performance measurements
197    measurements: VecDeque<PerformanceMeasurement>,
198    /// Performance trends by metric type
199    trends: HashMap<MetricType, PerformanceTrend>,
200    /// Baseline data by commit/version
201    baselines: HashMap<String, BaselineMetrics>,
202    /// Configuration metadata
203    metadata: DatabaseMetadata,
204}
205
206/// Single performance measurement
207#[derive(Debug, Clone, Serialize, Deserialize)]
208pub struct PerformanceMeasurement {
209    /// Timestamp of measurement
210    pub timestamp: SystemTime,
211    /// Git commit hash or version identifier
212    pub commithash: String,
213    /// Branch name
214    pub branch: String,
215    /// Build configuration (debug/release)
216    pub build_config: String,
217    /// Test environment information
218    pub environment: EnvironmentInfo,
219    /// Performance metrics
220    pub metrics: HashMap<MetricType, MetricValue>,
221    /// Test configuration details
222    pub test_config: TestConfiguration,
223    /// Additional metadata
224    pub metadata: HashMap<String, String>,
225}
226
227/// Performance metric value
228#[derive(Debug, Clone, Serialize, Deserialize)]
229pub struct MetricValue {
230    /// Primary value
231    pub value: f64,
232    /// Standard deviation (if applicable)
233    pub std_dev: Option<f64>,
234    /// Number of samples
235    pub sample_count: usize,
236    /// Minimum observed value
237    pub min_value: f64,
238    /// Maximum observed value
239    pub max_value: f64,
240    /// Additional statistics
241    pub percentiles: Option<Percentiles>,
242}
243
244/// Percentile data
245#[derive(Debug, Clone, Serialize, Deserialize)]
246pub struct Percentiles {
247    pub p50: f64,
248    pub p90: f64,
249    pub p95: f64,
250    pub p99: f64,
251}
252
253/// Environment information
254#[derive(Debug, Clone, Serialize, Deserialize)]
255pub struct EnvironmentInfo {
256    /// Operating system
257    pub os: String,
258    /// CPU model
259    pub cpu_model: String,
260    /// Number of CPU cores
261    pub cpu_cores: usize,
262    /// Total memory (MB)
263    pub total_memory_mb: usize,
264    /// GPU information (if available)
265    pub gpu_info: Option<String>,
266    /// Compiler version
267    pub compiler_version: String,
268    /// Rust version
269    pub rust_version: String,
270    /// Additional environment variables
271    pub env_vars: HashMap<String, String>,
272}
273
274/// Test configuration
275#[derive(Debug, Clone, Serialize, Deserialize)]
276pub struct TestConfiguration {
277    /// Test name
278    pub test_name: String,
279    /// Test parameters
280    pub parameters: HashMap<String, String>,
281    /// Dataset size
282    pub dataset_size: Option<usize>,
283    /// Optimization iterations
284    pub iterations: Option<usize>,
285    /// Batch size
286    pub batch_size: Option<usize>,
287    /// Precision (f32/f64)
288    pub precision: String,
289}
290
291/// Performance trend analysis
292#[derive(Debug, Clone)]
293pub struct PerformanceTrend {
294    /// Metric type
295    pub metrictype: MetricType,
296    /// Trend direction (improving/degrading/stable)
297    pub direction: TrendDirection,
298    /// Trend strength (0.0 to 1.0)
299    pub strength: f64,
300    /// Statistical significance
301    pub significance: f64,
302    /// Recent measurements for trend calculation
303    pub recent_values: VecDeque<f64>,
304    /// Long-term average
305    pub long_term_average: f64,
306    /// Volatility measure
307    pub volatility: f64,
308}
309
310/// Trend direction
311#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
312pub enum TrendDirection {
313    Improving,
314    Degrading,
315    Stable,
316    Uncertain,
317}
318
319/// Baseline metrics for comparison
320#[derive(Debug, Clone, Serialize, Deserialize)]
321pub struct BaselineMetrics {
322    /// Version/commit of baseline
323    pub version: String,
324    /// Baseline measurement timestamp
325    pub timestamp: SystemTime,
326    /// Baseline metric values
327    pub metrics: HashMap<MetricType, MetricValue>,
328    /// Confidence intervals
329    pub confidence_intervals: HashMap<MetricType, ConfidenceInterval>,
330    /// Quality score of baseline (0.0 to 1.0)
331    pub quality_score: f64,
332}
333
334/// Confidence interval
335#[derive(Debug, Clone, Serialize, Deserialize)]
336pub struct ConfidenceInterval {
337    pub lower_bound: f64,
338    pub upper_bound: f64,
339    pub confidence_level: f64,
340}
341
342/// Database metadata
343#[derive(Debug, Clone)]
344pub struct DatabaseMetadata {
345    /// Creation timestamp
346    pub created_at: SystemTime,
347    /// Last update timestamp
348    pub updated_at: SystemTime,
349    /// Database version
350    pub version: String,
351    /// Data retention period
352    pub retention_period: Duration,
353}
354
355/// Statistical analyzer for performance data
356#[derive(Debug)]
357pub struct StatisticalAnalyzer {
358    /// Configuration
359    config: StatisticalConfig,
360}
361
362/// Statistical analysis configuration
363#[derive(Debug, Clone)]
364pub struct StatisticalConfig {
365    /// Confidence level for tests
366    pub confidence_level: f64,
367    /// Alpha level for significance testing
368    pub alpha: f64,
369    /// Minimum effect size to consider significant
370    pub min_effect_size: f64,
371    /// Bootstrap sample size
372    pub bootstrap_samples: usize,
373}
374
375/// Regression analysis engine
376#[derive(Debug)]
377#[allow(dead_code)]
378pub struct RegressionAnalyzer {
379    /// Current analysis results
380    current_results: Vec<RegressionResult>,
381    /// Analysis configuration
382    config: RegressionAnalysisConfig,
383}
384
385/// Regression analysis configuration
386#[derive(Debug, Clone)]
387pub struct RegressionAnalysisConfig {
388    /// Regression detection algorithms
389    pub algorithms: Vec<RegressionAlgorithm>,
390    /// Sensitivity settings per metric
391    pub metric_sensitivity: HashMap<MetricType, f64>,
392    /// Temporal analysis window
393    pub analysis_window: Duration,
394}
395
396/// Regression detection algorithms
397#[derive(Debug, Clone)]
398pub enum RegressionAlgorithm {
399    /// Change point detection
400    ChangePoint,
401    /// Trend analysis
402    TrendAnalysis,
403    /// Statistical process control
404    StatisticalProcessControl,
405    /// Machine learning anomaly detection
406    AnomalyDetection,
407}
408
409/// Result of regression analysis
410#[derive(Debug, Clone, Serialize, Deserialize)]
411pub struct RegressionResult {
412    /// Metric that regressed
413    pub metric: MetricType,
414    /// Regression severity (0.0 to 1.0)
415    pub severity: f64,
416    /// Confidence level (0.0 to 1.0)
417    pub confidence: f64,
418    /// Statistical significance (p-value)
419    pub p_value: f64,
420    /// Effect size
421    pub effect_size: f64,
422    /// Baseline value
423    pub baseline_value: f64,
424    /// Current value
425    pub current_value: f64,
426    /// Performance change percentage
427    pub change_percentage: f64,
428    /// Detected regression type
429    pub regression_type: RegressionType,
430    /// Evidence for regression
431    pub evidence: Vec<String>,
432    /// Recommendations for investigation
433    pub recommendations: Vec<String>,
434}
435
436/// Types of performance regressions
437#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
438pub enum RegressionType {
439    /// Sudden performance drop
440    AbruptRegression,
441    /// Gradual performance degradation
442    GradualRegression,
443    /// Memory leak
444    MemoryLeak,
445    /// Increased error rate
446    IncreasedErrorRate,
447    /// Reduced throughput
448    ReducedThroughput,
449    /// Higher latency
450    IncreasedLatency,
451    /// Resource exhaustion
452    ResourceExhaustion,
453}
454
455/// Alert system for notifications
456#[derive(Debug)]
457pub struct AlertSystem {
458    /// Alert configuration
459    config: AlertConfig,
460    /// Alert history
461    alert_history: VecDeque<Alert>,
462}
463
464/// Alert configuration
465#[derive(Debug, Clone)]
466pub struct AlertConfig {
467    /// Enable alerts
468    pub enabled: bool,
469    /// Minimum severity for alerts
470    pub min_severity: AlertSeverity,
471    /// Rate limiting settings
472    pub rate_limit: RateLimit,
473    /// Notification channels
474    pub channels: Vec<NotificationChannel>,
475}
476
477/// Alert severity levels
478#[derive(Debug, Clone, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize)]
479pub enum AlertSeverity {
480    Low,
481    Medium,
482    High,
483    Critical,
484}
485
486/// Rate limiting configuration
487#[derive(Debug, Clone)]
488pub struct RateLimit {
489    /// Maximum alerts per time window
490    pub max_alerts: usize,
491    /// Time window for rate limiting
492    pub time_window: Duration,
493    /// Cooldown period between alerts
494    pub cooldown: Duration,
495}
496
497/// Notification channels
498#[derive(Debug, Clone)]
499pub enum NotificationChannel {
500    Email(EmailConfig),
501    Slack(SlackConfig),
502    Webhook(String),
503    Console,
504    File(PathBuf),
505}
506
507/// Performance alert
508#[derive(Debug, Clone, Serialize, Deserialize)]
509pub struct Alert {
510    /// Alert ID
511    pub id: String,
512    /// Alert timestamp
513    pub timestamp: SystemTime,
514    /// Alert severity
515    pub severity: AlertSeverity,
516    /// Alert title
517    pub title: String,
518    /// Alert description
519    pub description: String,
520    /// Affected metrics
521    pub affected_metrics: Vec<MetricType>,
522    /// Regression results that triggered the alert
523    pub regressionresults: Vec<RegressionResult>,
524    /// Recommended actions
525    pub recommended_actions: Vec<String>,
526    /// Alert metadata
527    pub metadata: HashMap<String, String>,
528}
529
530impl PerformanceRegressionDetector {
531    /// Create a new performance regression detector
532    pub fn new(config: RegressionConfig) -> Result<Self> {
533        let historical_data = PerformanceDatabase::new(config.max_history_size)?;
534        let statistical_analyzer = StatisticalAnalyzer::new(StatisticalConfig::default());
535        let regression_analyzer = RegressionAnalyzer::new(RegressionAnalysisConfig::default());
536        let alert_system = AlertSystem::new(AlertConfig::default());
537
538        Ok(Self {
539            config,
540            historical_data,
541            statistical_analyzer,
542            regression_analyzer,
543            alert_system,
544            baseline_metrics: None,
545        })
546    }
547
548    /// Add a new performance measurement
549    pub fn add_measurement(&mut self, measurement: PerformanceMeasurement) -> Result<()> {
550        self.historical_data.add_measurement(measurement)?;
551        self.update_trends()?;
552        Ok(())
553    }
554
555    /// Detect performance regressions
556    pub fn detect_regressions(&mut self) -> Result<Vec<RegressionResult>> {
557        if !self.config.enable_detection {
558            return Ok(vec![]);
559        }
560
561        let latest_measurements = self
562            .historical_data
563            .get_latest_measurements(self.config.min_samples)?;
564
565        if latest_measurements.len() < self.config.min_samples {
566            return Ok(vec![]);
567        }
568
569        let mut all_results = Vec::new();
570
571        for metrictype in &self.config.tracked_metrics {
572            let results = self.detect_metric_regression(metrictype, &latest_measurements)?;
573            all_results.extend(results);
574        }
575
576        // Filter results by confidence threshold
577        let filtered_results: Vec<_> = all_results
578            .into_iter()
579            .filter(|result| result.confidence >= self.config.confidence_threshold)
580            .collect();
581
582        // Generate alerts for significant regressions
583        if !filtered_results.is_empty() {
584            self.generate_alerts(&filtered_results)?;
585        }
586
587        self.regression_analyzer.current_results = filtered_results.clone();
588        Ok(filtered_results)
589    }
590
591    /// Detect regression for a specific metric
592    fn detect_metric_regression(
593        &self,
594        metrictype: &MetricType,
595        measurements: &[PerformanceMeasurement],
596    ) -> Result<Vec<RegressionResult>> {
597        let mut results = Vec::new();
598
599        // Extract metric values
600        let values: Vec<f64> = measurements
601            .iter()
602            .filter_map(|m| m.metrics.get(metrictype))
603            .map(|mv| mv.value)
604            .collect();
605
606        if values.len() < 2 {
607            return Ok(results);
608        }
609
610        // Get baseline for comparison
611        let baseline = self.get_baseline_for_metric(metrictype)?;
612
613        // Perform statistical tests
614        let statisticalresult = self.statistical_analyzer.perform_regression_test(
615            &values,
616            baseline.as_ref(),
617            &self.config.statistical_test,
618        )?;
619
620        if statisticalresult.is_significant {
621            let regression_result = RegressionResult {
622                metric: metrictype.clone(),
623                severity: self.calculate_severity(&statisticalresult),
624                confidence: statisticalresult.confidence,
625                p_value: statisticalresult.p_value,
626                effect_size: statisticalresult.effect_size,
627                baseline_value: baseline.clone().map(|b| b.value).unwrap_or(0.0),
628                current_value: *values.last().expect("unwrap failed"),
629                change_percentage: self.calculate_change_percentage(&values, baseline.as_ref()),
630                regression_type: self.classify_regression_type(metrictype, &values),
631                evidence: statisticalresult.evidence.clone(),
632                recommendations: self.generate_recommendations(metrictype, &statisticalresult),
633            };
634
635            results.push(regression_result);
636        }
637
638        Ok(results)
639    }
640
641    /// Calculate regression severity
642    fn calculate_severity(&self, statisticalresult: &StatisticalTestResult) -> f64 {
643        let base_severity = 1.0 - statisticalresult.p_value;
644        let effect_multiplier = (statisticalresult.effect_size / 2.0).min(1.0);
645        (base_severity * effect_multiplier).clamp(0.0, 1.0)
646    }
647
648    /// Calculate percentage change
649    fn calculate_change_percentage(&self, values: &[f64], baseline: Option<&MetricValue>) -> f64 {
650        if let Some(baseline) = baseline {
651            let current = *values.last().expect("unwrap failed");
652            if baseline.value != 0.0 {
653                ((current - baseline.value) / baseline.value) * 100.0
654            } else {
655                0.0
656            }
657        } else if values.len() >= 2 {
658            let previous = values[values.len() - 2];
659            let current = *values.last().expect("unwrap failed");
660            if previous != 0.0 {
661                ((current - previous) / previous) * 100.0
662            } else {
663                0.0
664            }
665        } else {
666            0.0
667        }
668    }
669
670    /// Classify the type of regression
671    fn classify_regression_type(&self, metrictype: &MetricType, values: &[f64]) -> RegressionType {
672        // Simple heuristic classification
673        match metrictype {
674            MetricType::MemoryUsage => {
675                if self.is_monotonic_increase(values) {
676                    RegressionType::MemoryLeak
677                } else {
678                    RegressionType::AbruptRegression
679                }
680            }
681            MetricType::ExecutionTime => RegressionType::IncreasedLatency,
682            MetricType::Throughput => RegressionType::ReducedThroughput,
683            MetricType::ErrorRate => {
684                if self.is_gradual_change(values) {
685                    RegressionType::GradualRegression
686                } else {
687                    RegressionType::AbruptRegression
688                }
689            }
690            MetricType::CpuUtilization | MetricType::GpuUtilization => {
691                if self.is_gradual_change(values) {
692                    RegressionType::GradualRegression
693                } else {
694                    RegressionType::AbruptRegression
695                }
696            }
697            MetricType::CacheHitRate => RegressionType::ReducedThroughput,
698            MetricType::Flops => RegressionType::ReducedThroughput,
699            MetricType::ConvergenceRate => RegressionType::GradualRegression,
700            MetricType::Custom(_) => RegressionType::AbruptRegression, // Default for custom metrics
701        }
702    }
703
704    /// Check if values show monotonic increase
705    fn is_monotonic_increase(&self, values: &[f64]) -> bool {
706        values.windows(2).all(|w| w[1] >= w[0])
707    }
708
709    /// Check if change is gradual
710    fn is_gradual_change(&self, values: &[f64]) -> bool {
711        if values.len() < 3 {
712            return false;
713        }
714
715        let changes: Vec<f64> = values.windows(2).map(|w| (w[1] - w[0]).abs()).collect();
716
717        let avg_change = changes.iter().sum::<f64>() / changes.len() as f64;
718        let max_change = changes.iter().fold(0.0f64, |acc, &x| acc.max(x));
719
720        max_change < avg_change * 2.0 // Gradual if no single large change
721    }
722
723    /// Generate recommendations for investigation
724    fn generate_recommendations(
725        &self,
726        metrictype: &MetricType,
727        statisticalresult: &StatisticalTestResult,
728    ) -> Vec<String> {
729        let mut recommendations = Vec::new();
730
731        match metrictype {
732            MetricType::ExecutionTime => {
733                recommendations
734                    .push("Profile the code to identify performance bottlenecks".to_string());
735                recommendations
736                    .push("Check for algorithmic changes or inefficient loops".to_string());
737                recommendations.push("Verify compiler optimizations are enabled".to_string());
738            }
739            MetricType::MemoryUsage => {
740                recommendations.push("Run memory leak detection tools".to_string());
741                recommendations.push("Check for memory allocation patterns".to_string());
742                recommendations.push("Verify proper cleanup of resources".to_string());
743            }
744            MetricType::Throughput => {
745                recommendations.push("Analyze parallelization and concurrency".to_string());
746                recommendations.push("Check for synchronization bottlenecks".to_string());
747                recommendations.push("Verify hardware utilization efficiency".to_string());
748            }
749            _ => {
750                recommendations.push("Investigate recent code changes".to_string());
751                recommendations.push("Review commit history for relevant changes".to_string());
752            }
753        }
754
755        if statisticalresult.effect_size > 0.5 {
756            recommendations.push("Consider this a high-priority investigation".to_string());
757        }
758
759        recommendations
760    }
761
762    /// Get baseline metric value
763    fn get_baseline_for_metric(&self, metrictype: &MetricType) -> Result<Option<MetricValue>> {
764        if let Some(baseline) = &self.baseline_metrics {
765            Ok(baseline.metrics.get(metrictype).cloned())
766        } else {
767            Ok(None)
768        }
769    }
770
771    /// Update performance trends
772    fn update_trends(&mut self) -> Result<()> {
773        for metrictype in &self.config.tracked_metrics.clone() {
774            let recent_measurements = self.historical_data.get_recent_measurements_for_metric(
775                metrictype, 50, // Last 50 measurements
776            )?;
777
778            if recent_measurements.len() >= 10 {
779                let trend = self.calculate_trend(metrictype, &recent_measurements)?;
780                self.historical_data
781                    .trends
782                    .insert(metrictype.clone(), trend);
783            }
784        }
785
786        Ok(())
787    }
788
789    /// Calculate performance trend
790    fn calculate_trend(&self, metrictype: &MetricType, values: &[f64]) -> Result<PerformanceTrend> {
791        let direction = self.determine_trend_direction(values);
792        let strength = self.calculate_trend_strength(values);
793        let significance = self.calculate_trend_significance(values);
794        let long_term_average = values.iter().sum::<f64>() / values.len() as f64;
795        let volatility = self.calculate_volatility(values);
796
797        Ok(PerformanceTrend {
798            metrictype: metrictype.clone(),
799            direction,
800            strength,
801            significance,
802            recent_values: values.iter().copied().collect(),
803            long_term_average,
804            volatility,
805        })
806    }
807
808    /// Determine trend direction using linear regression
809    fn determine_trend_direction(&self, values: &[f64]) -> TrendDirection {
810        if values.len() < 3 {
811            return TrendDirection::Uncertain;
812        }
813
814        // Simple linear regression slope calculation
815        let n = values.len() as f64;
816        let x_values: Vec<f64> = (0..values.len()).map(|i| i as f64).collect();
817
818        let sum_x = x_values.iter().sum::<f64>();
819        let sum_y = values.iter().sum::<f64>();
820        let sum_xy = x_values
821            .iter()
822            .zip(values.iter())
823            .map(|(x, y)| x * y)
824            .sum::<f64>();
825        let sum_x2 = x_values.iter().map(|x| x * x).sum::<f64>();
826
827        let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x);
828
829        if slope.abs() <= 0.015 {
830            // Slightly more tolerant threshold for stability
831            TrendDirection::Stable
832        } else if slope > 0.0 {
833            TrendDirection::Degrading
834        } else {
835            TrendDirection::Improving
836        }
837    }
838
839    /// Calculate trend strength (0.0 to 1.0)
840    fn calculate_trend_strength(&self, values: &[f64]) -> f64 {
841        if values.len() < 2 {
842            return 0.0;
843        }
844
845        let first = values[0];
846        let last = *values.last().expect("unwrap failed");
847        let max_value = values.iter().fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
848        let min_value = values.iter().fold(f64::INFINITY, |acc, &x| acc.min(x));
849
850        if max_value == min_value {
851            return 0.0;
852        }
853
854        ((last - first).abs() / (max_value - min_value)).clamp(0.0, 1.0)
855    }
856
857    /// Calculate trend statistical significance
858    fn calculate_trend_significance(&self, values: &[f64]) -> f64 {
859        // Simplified significance calculation
860        // In practice, would use proper statistical tests
861        if values.len() < 5 {
862            return 0.0;
863        }
864
865        let variance = self.calculate_variance(values);
866        let strength = self.calculate_trend_strength(values);
867
868        // Higher variance reduces significance, higher strength increases it
869        (strength / (1.0 + variance.sqrt())).clamp(0.0, 1.0)
870    }
871
872    /// Calculate variance of values
873    fn calculate_variance(&self, values: &[f64]) -> f64 {
874        if values.len() < 2 {
875            return 0.0;
876        }
877
878        let mean = values.iter().sum::<f64>() / values.len() as f64;
879        let sum_squares = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>();
880        sum_squares / (values.len() - 1) as f64
881    }
882
883    /// Calculate volatility (coefficient of variation)
884    fn calculate_volatility(&self, values: &[f64]) -> f64 {
885        if values.is_empty() {
886            return 0.0;
887        }
888
889        let mean = values.iter().sum::<f64>() / values.len() as f64;
890        if mean == 0.0 {
891            return 0.0;
892        }
893
894        let variance = self.calculate_variance(values);
895        variance.sqrt() / mean
896    }
897
898    /// Generate alerts for regressions
899    fn generate_alerts(&mut self, regressionresults: &[RegressionResult]) -> Result<()> {
900        for result in regressionresults {
901            if result.severity >= 0.7 {
902                // High severity threshold
903                let alert = Alert {
904                    id: format!("regression_{}_{}", 
905                        result.metric.to_string().to_lowercase(),
906                        SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()
907                    ),
908                    timestamp: SystemTime::now(),
909                    severity: self.map_severity(result.severity),
910                    title: format!("Performance Regression Detected: {}", result.metric),
911                    description: format!(
912                        "Performance regression detected for metric '{}'. Change: {:.2}%, Confidence: {:.2}%",
913                        result.metric,
914                        result.change_percentage,
915                        result.confidence * 100.0
916                    ),
917                    affected_metrics: vec![result.metric.clone()],
918                    regressionresults: vec![result.clone()],
919                    recommended_actions: result.recommendations.clone(),
920                    metadata: HashMap::new(),
921                };
922
923                self.alert_system.send_alert(alert)?;
924            }
925        }
926
927        Ok(())
928    }
929
930    /// Map numeric severity to alert severity
931    fn map_severity(&self, severity: f64) -> AlertSeverity {
932        match severity {
933            s if s >= 0.9 => AlertSeverity::Critical,
934            s if s >= 0.7 => AlertSeverity::High,
935            s if s >= 0.5 => AlertSeverity::Medium,
936            _ => AlertSeverity::Low,
937        }
938    }
939
940    /// Set baseline metrics
941    pub fn set_baseline(&mut self, baseline: BaselineMetrics) -> Result<()> {
942        self.baseline_metrics = Some(baseline);
943        Ok(())
944    }
945
946    /// Update baseline from recent measurements
947    pub fn update_baseline_from_recent(&mut self, commithash: String) -> Result<()> {
948        let recent_measurements = self
949            .historical_data
950            .get_latest_measurements(self.config.min_samples)?;
951
952        if recent_measurements.len() < self.config.min_samples {
953            return Err(OptimError::InvalidConfig(
954                "Insufficient measurements for baseline update".to_string(),
955            ));
956        }
957
958        let mut metrics = HashMap::new();
959        let mut confidence_intervals = HashMap::new();
960
961        for metrictype in &self.config.tracked_metrics {
962            let values: Vec<f64> = recent_measurements
963                .iter()
964                .filter_map(|m| m.metrics.get(metrictype))
965                .map(|mv| mv.value)
966                .collect();
967
968            if !values.is_empty() {
969                let mean = values.iter().sum::<f64>() / values.len() as f64;
970                let variance = self.calculate_variance(&values);
971                let std_dev = variance.sqrt();
972
973                metrics.insert(
974                    metrictype.clone(),
975                    MetricValue {
976                        value: mean,
977                        std_dev: Some(std_dev),
978                        sample_count: values.len(),
979                        min_value: values.iter().fold(f64::INFINITY, |acc, &x| acc.min(x)),
980                        max_value: values.iter().fold(f64::NEG_INFINITY, |acc, &x| acc.max(x)),
981                        percentiles: None,
982                    },
983                );
984
985                // 95% confidence interval
986                let margin = 1.96 * std_dev / (values.len() as f64).sqrt();
987                confidence_intervals.insert(
988                    metrictype.clone(),
989                    ConfidenceInterval {
990                        lower_bound: mean - margin,
991                        upper_bound: mean + margin,
992                        confidence_level: 0.95,
993                    },
994                );
995            }
996        }
997
998        let baseline = BaselineMetrics {
999            version: commithash,
1000            timestamp: SystemTime::now(),
1001            metrics,
1002            confidence_intervals,
1003            quality_score: self.calculate_baseline_quality(&recent_measurements),
1004        };
1005
1006        self.set_baseline(baseline)?;
1007        Ok(())
1008    }
1009
1010    /// Calculate baseline quality score
1011    fn calculate_baseline_quality(&self, measurements: &[PerformanceMeasurement]) -> f64 {
1012        if measurements.is_empty() {
1013            return 0.0;
1014        }
1015
1016        // Quality factors:
1017        // 1. Number of measurements (more is better)
1018        // 2. Consistency of measurements (lower variance is better)
1019        // 3. Completeness of metrics (more metrics is better)
1020
1021        let sample_score = (measurements.len() as f64 / 20.0).min(1.0); // Good if >= 20 samples
1022
1023        let mut consistency_scores = Vec::new();
1024        for metrictype in &self.config.tracked_metrics {
1025            let values: Vec<f64> = measurements
1026                .iter()
1027                .filter_map(|m| m.metrics.get(metrictype))
1028                .map(|mv| mv.value)
1029                .collect();
1030
1031            if !values.is_empty() {
1032                let cv = self.calculate_volatility(&values);
1033                consistency_scores.push((1.0 / (1.0 + cv)).clamp(0.0, 1.0));
1034            }
1035        }
1036
1037        let consistency_score = if consistency_scores.is_empty() {
1038            0.0
1039        } else {
1040            consistency_scores.iter().sum::<f64>() / consistency_scores.len() as f64
1041        };
1042
1043        let completeness_score = if self.config.tracked_metrics.is_empty() {
1044            1.0
1045        } else {
1046            let complete_metrics = measurements
1047                .iter()
1048                .map(|m| {
1049                    self.config
1050                        .tracked_metrics
1051                        .iter()
1052                        .filter(|metric| m.metrics.contains_key(metric))
1053                        .count()
1054                })
1055                .max()
1056                .unwrap_or(0);
1057
1058            complete_metrics as f64 / self.config.tracked_metrics.len() as f64
1059        };
1060
1061        (sample_score + consistency_score + completeness_score) / 3.0
1062    }
1063
1064    /// Export performance data for CI/CD reporting
1065    pub fn export_for_ci_cd(&self) -> Result<CiCdReport> {
1066        let latest_results = &self.regression_analyzer.current_results;
1067
1068        let status = if latest_results.iter().any(|r| r.severity >= 0.7) {
1069            CiCdStatus::Failed
1070        } else if latest_results.iter().any(|r| r.severity >= 0.5) {
1071            CiCdStatus::Warning
1072        } else {
1073            CiCdStatus::Passed
1074        };
1075
1076        let report = CiCdReport {
1077            status,
1078            timestamp: SystemTime::now(),
1079            regression_count: latest_results.len(),
1080            critical_regressions: latest_results.iter().filter(|r| r.severity >= 0.9).count(),
1081            high_severity_regressions: latest_results.iter().filter(|r| r.severity >= 0.7).count(),
1082            regressionresults: latest_results.clone(),
1083            performance_summary: self.generate_performance_summary()?,
1084            recommendations: self.generate_overall_recommendations(latest_results),
1085        };
1086
1087        Ok(report)
1088    }
1089
1090    /// Generate performance summary
1091    fn generate_performance_summary(&self) -> Result<PerformanceSummary> {
1092        let latest_measurements = self.historical_data.get_latest_measurements(5)?;
1093
1094        let mut metric_summaries = HashMap::new();
1095        for metrictype in &self.config.tracked_metrics {
1096            let values: Vec<f64> = latest_measurements
1097                .iter()
1098                .filter_map(|m| m.metrics.get(metrictype))
1099                .map(|mv| mv.value)
1100                .collect();
1101
1102            if !values.is_empty() {
1103                let trend = self.historical_data.trends.get(metrictype);
1104                metric_summaries.insert(
1105                    metrictype.clone(),
1106                    MetricSummary {
1107                        current_value: *values.last().expect("unwrap failed"),
1108                        trend_direction: trend
1109                            .map(|t| t.direction.clone())
1110                            .unwrap_or(TrendDirection::Uncertain),
1111                        trend_strength: trend.map(|t| t.strength).unwrap_or(0.0),
1112                        stability_score: trend.map(|t| 1.0 - t.volatility).unwrap_or(1.0),
1113                    },
1114                );
1115            }
1116        }
1117
1118        Ok(PerformanceSummary {
1119            overall_health_score: self.calculate_overall_health_score(&metric_summaries),
1120            metric_summaries,
1121            data_quality_score: self
1122                .baseline_metrics
1123                .as_ref()
1124                .map(|b| b.quality_score)
1125                .unwrap_or(0.0),
1126        })
1127    }
1128
1129    /// Calculate overall health score
1130    fn calculate_overall_health_score(
1131        &self,
1132        summaries: &HashMap<MetricType, MetricSummary>,
1133    ) -> f64 {
1134        if summaries.is_empty() {
1135            return 1.0;
1136        }
1137
1138        let scores: Vec<f64> = summaries
1139            .values()
1140            .map(|summary| {
1141                let trend_score = match summary.trend_direction {
1142                    TrendDirection::Improving => 1.0,
1143                    TrendDirection::Stable => 0.8,
1144                    TrendDirection::Degrading => 0.3,
1145                    TrendDirection::Uncertain => 0.6,
1146                };
1147
1148                (trend_score + summary.stability_score) / 2.0
1149            })
1150            .collect();
1151
1152        scores.iter().sum::<f64>() / scores.len() as f64
1153    }
1154
1155    /// Generate overall recommendations
1156    fn generate_overall_recommendations(&self, results: &[RegressionResult]) -> Vec<String> {
1157        let mut recommendations = Vec::new();
1158
1159        if results.is_empty() {
1160            recommendations
1161                .push("No performance regressions detected. Continue monitoring.".to_string());
1162            return recommendations;
1163        }
1164
1165        let critical_count = results.iter().filter(|r| r.severity >= 0.9).count();
1166        let high_count = results.iter().filter(|r| r.severity >= 0.7).count();
1167
1168        if critical_count > 0 {
1169            recommendations.push(format!(
1170                "CRITICAL: {} critical performance regressions detected. Immediate investigation required.",
1171                critical_count
1172            ));
1173        }
1174
1175        if high_count > 0 {
1176            recommendations.push(format!(
1177                "HIGH: {} high-severity performance regressions detected. Investigation recommended.",
1178                high_count
1179            ));
1180        }
1181
1182        // Group recommendations by regression type
1183        let regression_types: std::collections::HashSet<_> =
1184            results.iter().map(|r| &r.regression_type).collect();
1185
1186        for regression_type in regression_types {
1187            match regression_type {
1188                RegressionType::MemoryLeak => {
1189                    recommendations
1190                        .push("Memory leak detected. Run memory profiling tools.".to_string());
1191                }
1192                RegressionType::IncreasedLatency => {
1193                    recommendations.push(
1194                        "Performance degradation detected. Profile critical paths.".to_string(),
1195                    );
1196                }
1197                RegressionType::ReducedThroughput => {
1198                    recommendations.push(
1199                        "Throughput reduction detected. Check parallelization efficiency."
1200                            .to_string(),
1201                    );
1202                }
1203                _ => {}
1204            }
1205        }
1206
1207        recommendations
1208            .push("Review recent commits for performance-impacting changes.".to_string());
1209        recommendations
1210    }
1211}
1212
1213/// Statistical test result
1214#[derive(Debug, Clone)]
1215pub struct StatisticalTestResult {
1216    pub is_significant: bool,
1217    pub p_value: f64,
1218    pub confidence: f64,
1219    pub effect_size: f64,
1220    pub evidence: Vec<String>,
1221}
1222
1223/// CI/CD report structure
1224#[derive(Debug, Clone, Serialize, Deserialize)]
1225pub struct CiCdReport {
1226    pub status: CiCdStatus,
1227    pub timestamp: SystemTime,
1228    pub regression_count: usize,
1229    pub critical_regressions: usize,
1230    pub high_severity_regressions: usize,
1231    pub regressionresults: Vec<RegressionResult>,
1232    pub performance_summary: PerformanceSummary,
1233    pub recommendations: Vec<String>,
1234}
1235
1236/// CI/CD status
1237#[derive(Debug, Clone, Serialize, Deserialize)]
1238pub enum CiCdStatus {
1239    Passed,
1240    Warning,
1241    Failed,
1242}
1243
1244/// Performance summary
1245#[derive(Debug, Clone, Serialize, Deserialize)]
1246pub struct PerformanceSummary {
1247    pub overall_health_score: f64,
1248    pub metric_summaries: HashMap<MetricType, MetricSummary>,
1249    pub data_quality_score: f64,
1250}
1251
1252/// Individual metric summary
1253#[derive(Debug, Clone, Serialize, Deserialize)]
1254pub struct MetricSummary {
1255    pub current_value: f64,
1256    pub trend_direction: TrendDirection,
1257    pub trend_strength: f64,
1258    pub stability_score: f64,
1259}
1260
1261// Trait implementations for serialization
1262
1263impl std::fmt::Display for MetricType {
1264    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1265        match self {
1266            MetricType::ExecutionTime => write!(f, "execution_time"),
1267            MetricType::MemoryUsage => write!(f, "memory_usage"),
1268            MetricType::Throughput => write!(f, "throughput"),
1269            MetricType::CpuUtilization => write!(f, "cpu_utilization"),
1270            MetricType::GpuUtilization => write!(f, "gpu_utilization"),
1271            MetricType::CacheHitRate => write!(f, "cache_hit_rate"),
1272            MetricType::Flops => write!(f, "flops"),
1273            MetricType::ConvergenceRate => write!(f, "convergence_rate"),
1274            MetricType::ErrorRate => write!(f, "error_rate"),
1275            MetricType::Custom(name) => write!(f, "custom_{}", name),
1276        }
1277    }
1278}
1279
1280// Implementation stubs for supporting types
1281
1282impl PerformanceDatabase {
1283    fn new(_maxsize: usize) -> Result<Self> {
1284        Ok(Self {
1285            measurements: VecDeque::with_capacity(_maxsize),
1286            trends: HashMap::new(),
1287            baselines: HashMap::new(),
1288            metadata: DatabaseMetadata {
1289                created_at: SystemTime::now(),
1290                updated_at: SystemTime::now(),
1291                version: "1.0.0".to_string(),
1292                retention_period: Duration::from_secs(30 * 24 * 60 * 60), // 30 days
1293            },
1294        })
1295    }
1296
1297    fn add_measurement(&mut self, measurement: PerformanceMeasurement) -> Result<()> {
1298        self.measurements.push_back(measurement);
1299        if self.measurements.len() > self.measurements.capacity() {
1300            self.measurements.pop_front();
1301        }
1302        self.metadata.updated_at = SystemTime::now();
1303        Ok(())
1304    }
1305
1306    fn get_latest_measurements(&self, count: usize) -> Result<Vec<PerformanceMeasurement>> {
1307        Ok(self
1308            .measurements
1309            .iter()
1310            .rev()
1311            .take(count)
1312            .cloned()
1313            .collect())
1314    }
1315
1316    fn get_recent_measurements_for_metric(
1317        &self,
1318        metrictype: &MetricType,
1319        count: usize,
1320    ) -> Result<Vec<f64>> {
1321        Ok(self
1322            .measurements
1323            .iter()
1324            .rev()
1325            .take(count)
1326            .filter_map(|m| m.metrics.get(metrictype))
1327            .map(|mv| mv.value)
1328            .collect())
1329    }
1330}
1331
1332impl StatisticalAnalyzer {
1333    fn new(config: StatisticalConfig) -> Self {
1334        Self { config }
1335    }
1336
1337    fn perform_regression_test(
1338        &self,
1339        values: &[f64],
1340        baseline: Option<&MetricValue>,
1341        test_type: &StatisticalTest,
1342    ) -> Result<StatisticalTestResult> {
1343        // Simplified implementation - would use proper statistical libraries
1344        let p_value = self.calculate_p_value(values, baseline, test_type);
1345        let effect_size = self.calculate_effect_size(values, baseline);
1346        let confidence = 1.0 - p_value;
1347        let is_significant =
1348            p_value < self.config.alpha && effect_size > self.config.min_effect_size;
1349
1350        Ok(StatisticalTestResult {
1351            is_significant,
1352            p_value,
1353            confidence,
1354            effect_size,
1355            evidence: vec![format!("Statistical test: {:?}", test_type)],
1356        })
1357    }
1358
1359    fn calculate_p_value(
1360        &self,
1361        values: &[f64],
1362        _baseline: Option<&MetricValue>,
1363        _test_type: &StatisticalTest,
1364    ) -> f64 {
1365        // Simplified - would implement actual statistical tests
1366        0.05
1367    }
1368
1369    fn calculate_effect_size(&self, values: &[f64], baseline: Option<&MetricValue>) -> f64 {
1370        if let Some(baseline) = baseline {
1371            let current_mean = values.iter().sum::<f64>() / values.len() as f64;
1372            let pooled_std =
1373                (baseline.std_dev.unwrap_or(1.0) + self.calculate_std_dev(values)) / 2.0;
1374
1375            if pooled_std > 0.0 {
1376                ((current_mean - baseline.value) / pooled_std).abs()
1377            } else {
1378                0.0
1379            }
1380        } else {
1381            0.0
1382        }
1383    }
1384
1385    fn calculate_std_dev(&self, values: &[f64]) -> f64 {
1386        if values.len() < 2 {
1387            return 0.0;
1388        }
1389
1390        let mean = values.iter().sum::<f64>() / values.len() as f64;
1391        let variance =
1392            values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (values.len() - 1) as f64;
1393
1394        variance.sqrt()
1395    }
1396}
1397
1398impl RegressionAnalyzer {
1399    fn new(config: RegressionAnalysisConfig) -> Self {
1400        Self {
1401            current_results: Vec::new(),
1402            config,
1403        }
1404    }
1405}
1406
1407impl AlertSystem {
1408    fn new(config: AlertConfig) -> Self {
1409        Self {
1410            config,
1411            alert_history: VecDeque::new(),
1412        }
1413    }
1414
1415    fn send_alert(&mut self, alert: Alert) -> Result<()> {
1416        if !self.config.enabled {
1417            return Ok(());
1418        }
1419
1420        // Add to history
1421        self.alert_history.push_back(alert.clone());
1422
1423        // Send via configured channels
1424        for channel in &self.config.channels {
1425            self.send_via_channel(&alert, channel)?;
1426        }
1427
1428        println!("🚨 PERFORMANCE ALERT: {}", alert.title);
1429        println!("   Description: {}", alert.description);
1430        println!("   Severity: {:?}", alert.severity);
1431
1432        Ok(())
1433    }
1434
1435    fn send_via_channel(&self, alert: &Alert, channel: &NotificationChannel) -> Result<()> {
1436        match channel {
1437            NotificationChannel::Console => {
1438                println!("ALERT: {}", alert.title);
1439            }
1440            NotificationChannel::File(path) => {
1441                let alert_json = serde_json::to_string_pretty(alert)?;
1442                std::fs::write(path, alert_json)?;
1443            }
1444            NotificationChannel::Email(config) => {
1445                // Would implement email sending
1446                println!("Email alert sent: {}", alert.title);
1447            }
1448            NotificationChannel::Slack(config) => {
1449                // Would implement Slack webhook
1450                println!("Slack alert sent: {}", alert.title);
1451            }
1452            NotificationChannel::Webhook(url) => {
1453                // Would implement webhook call
1454                println!("Webhook alert sent to {}: {}", url, alert.title);
1455            }
1456        }
1457        Ok(())
1458    }
1459}
1460
1461// Default implementations
1462
1463impl Default for RegressionConfig {
1464    fn default() -> Self {
1465        Self {
1466            enable_detection: true,
1467            confidence_threshold: 0.95,
1468            degradation_threshold: 0.05,
1469            min_samples: 10,
1470            max_history_size: 1000,
1471            tracked_metrics: vec![
1472                MetricType::ExecutionTime,
1473                MetricType::MemoryUsage,
1474                MetricType::Throughput,
1475            ],
1476            statistical_test: StatisticalTest::MannWhitneyU,
1477            sensitivity: RegressionSensitivity::Medium,
1478            baseline_strategy: BaselineStrategy::AutoImprovement,
1479            alert_thresholds: AlertThresholds::default(),
1480            ci_cd_config: CiCdConfig::default(),
1481        }
1482    }
1483}
1484
1485impl Default for AlertThresholds {
1486    fn default() -> Self {
1487        let mut degradation_thresholds = HashMap::new();
1488        degradation_thresholds.insert(MetricType::ExecutionTime, 0.1); // 10% slower
1489        degradation_thresholds.insert(MetricType::MemoryUsage, 0.2); // 20% more memory
1490        degradation_thresholds.insert(MetricType::Throughput, 0.1); // 10% less throughput
1491
1492        Self {
1493            degradation_thresholds,
1494            memory_increase_thresholds: HashMap::new(),
1495            failure_rate_threshold: 0.05, // 5% failure rate
1496            timeout_threshold: 300.0,     // 5 minutes
1497        }
1498    }
1499}
1500
1501impl Default for CiCdConfig {
1502    fn default() -> Self {
1503        Self {
1504            enabled: true,
1505            fail_on_regression: false,
1506            generate_reports: true,
1507            report_format: ReportFormat::Json,
1508            report_path: PathBuf::from("performance_report.json"),
1509            webhook_urls: Vec::new(),
1510            slack_config: None,
1511            email_config: None,
1512        }
1513    }
1514}
1515
1516impl Default for StatisticalConfig {
1517    fn default() -> Self {
1518        Self {
1519            confidence_level: 0.95,
1520            alpha: 0.05,
1521            min_effect_size: 0.2,
1522            bootstrap_samples: 1000,
1523        }
1524    }
1525}
1526
1527impl Default for RegressionAnalysisConfig {
1528    fn default() -> Self {
1529        Self {
1530            algorithms: vec![
1531                RegressionAlgorithm::ChangePoint,
1532                RegressionAlgorithm::TrendAnalysis,
1533            ],
1534            metric_sensitivity: HashMap::new(),
1535            analysis_window: Duration::from_secs(7 * 24 * 60 * 60), // 7 days
1536        }
1537    }
1538}
1539
1540impl Default for AlertConfig {
1541    fn default() -> Self {
1542        Self {
1543            enabled: true,
1544            min_severity: AlertSeverity::Medium,
1545            rate_limit: RateLimit {
1546                max_alerts: 10,
1547                time_window: Duration::from_secs(60 * 60), // 1 hour
1548                cooldown: Duration::from_secs(5 * 60),     // 5 minutes
1549            },
1550            channels: vec![NotificationChannel::Console],
1551        }
1552    }
1553}
1554
1555impl Default for EnvironmentInfo {
1556    fn default() -> Self {
1557        Self {
1558            os: "linux".to_string(),
1559            cpu_model: "unknown".to_string(),
1560            cpu_cores: 4,
1561            total_memory_mb: 8192,
1562            gpu_info: None,
1563            compiler_version: "rustc 1.70".to_string(),
1564            rust_version: "1.70.0".to_string(),
1565            env_vars: HashMap::new(),
1566        }
1567    }
1568}
1569
1570impl Default for TestConfiguration {
1571    fn default() -> Self {
1572        Self {
1573            test_name: "default_test".to_string(),
1574            parameters: HashMap::new(),
1575            dataset_size: Some(1000),
1576            iterations: Some(100),
1577            batch_size: Some(32),
1578            precision: "f64".to_string(),
1579        }
1580    }
1581}
1582
1583// Hash implementations for MetricType
1584impl std::hash::Hash for MetricType {
1585    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
1586        std::mem::discriminant(self).hash(state);
1587        match self {
1588            MetricType::Custom(name) => name.hash(state),
1589            // For other variants, discriminant is sufficient
1590            MetricType::ExecutionTime
1591            | MetricType::MemoryUsage
1592            | MetricType::Throughput
1593            | MetricType::CpuUtilization
1594            | MetricType::GpuUtilization
1595            | MetricType::CacheHitRate
1596            | MetricType::Flops
1597            | MetricType::ConvergenceRate
1598            | MetricType::ErrorRate => {}
1599        }
1600    }
1601}
1602
1603impl PartialEq for MetricType {
1604    fn eq(&self, other: &Self) -> bool {
1605        match (self, other) {
1606            (MetricType::Custom(a), MetricType::Custom(b)) => a == b,
1607            _ => std::mem::discriminant(self) == std::mem::discriminant(other),
1608        }
1609    }
1610}
1611
1612impl Eq for MetricType {}
1613
1614#[cfg(test)]
1615mod tests {
1616    use super::*;
1617
1618    #[test]
1619    fn test_regression_detector_creation() {
1620        let config = RegressionConfig::default();
1621        let detector = PerformanceRegressionDetector::new(config).expect("unwrap failed");
1622        assert!(detector.regression_analyzer.current_results.is_empty());
1623    }
1624
1625    #[test]
1626    fn test_trend_direction_calculation() {
1627        let config = RegressionConfig::default();
1628        let detector = PerformanceRegressionDetector::new(config).expect("unwrap failed");
1629
1630        // Improving trend (decreasing values for execution time)
1631        let improving_values = vec![10.0, 9.0, 8.0, 7.0, 6.0];
1632        let direction = detector.determine_trend_direction(&improving_values);
1633        assert_eq!(direction, TrendDirection::Improving);
1634
1635        // Degrading trend (increasing values)
1636        let degrading_values = vec![6.0, 7.0, 8.0, 9.0, 10.0];
1637        let direction = detector.determine_trend_direction(&degrading_values);
1638        assert_eq!(direction, TrendDirection::Degrading);
1639
1640        // Stable trend
1641        let stable_values = vec![8.0, 8.1, 7.9, 8.0, 8.1];
1642        let direction = detector.determine_trend_direction(&stable_values);
1643        assert_eq!(direction, TrendDirection::Stable);
1644    }
1645
1646    #[test]
1647    fn test_baseline_quality_calculation() {
1648        let config = RegressionConfig::default();
1649        let detector = PerformanceRegressionDetector::new(config).expect("unwrap failed");
1650
1651        // Create test measurements
1652        let mut measurements = Vec::new();
1653        for i in 0..20 {
1654            let mut metrics = HashMap::new();
1655            metrics.insert(
1656                MetricType::ExecutionTime,
1657                MetricValue {
1658                    value: 10.0 + (i as f64 * 0.1),
1659                    std_dev: Some(0.5),
1660                    sample_count: 1,
1661                    min_value: 10.0,
1662                    max_value: 12.0,
1663                    percentiles: None,
1664                },
1665            );
1666
1667            measurements.push(PerformanceMeasurement {
1668                timestamp: SystemTime::now(),
1669                commithash: format!("commit_{}", i),
1670                branch: "main".to_string(),
1671                build_config: "release".to_string(),
1672                environment: EnvironmentInfo::default(),
1673                metrics,
1674                test_config: TestConfiguration::default(),
1675                metadata: HashMap::new(),
1676            });
1677        }
1678
1679        let quality = detector.calculate_baseline_quality(&measurements);
1680        assert!(quality > 0.0);
1681        assert!(quality <= 1.0);
1682    }
1683
1684    #[test]
1685    fn test_regression_type_classification() {
1686        let config = RegressionConfig::default();
1687        let detector = PerformanceRegressionDetector::new(config).expect("unwrap failed");
1688
1689        // Memory leak pattern (monotonic increase)
1690        let memory_leak_values = vec![100.0, 105.0, 110.0, 115.0, 120.0];
1691        let regression_type =
1692            detector.classify_regression_type(&MetricType::MemoryUsage, &memory_leak_values);
1693        assert!(matches!(regression_type, RegressionType::MemoryLeak));
1694
1695        // Execution time increase
1696        let latency_values = vec![10.0, 12.0, 15.0, 18.0, 20.0];
1697        let regression_type =
1698            detector.classify_regression_type(&MetricType::ExecutionTime, &latency_values);
1699        assert!(matches!(regression_type, RegressionType::IncreasedLatency));
1700    }
1701
1702    #[test]
1703    fn test_ci_cd_report_generation() {
1704        let config = RegressionConfig::default();
1705        let detector = PerformanceRegressionDetector::new(config).expect("unwrap failed");
1706
1707        let report = detector.export_for_ci_cd().expect("unwrap failed");
1708        assert!(matches!(report.status, CiCdStatus::Passed));
1709        assert_eq!(report.regression_count, 0);
1710    }
1711}