1use chrono::{DateTime, Utc};
7use scirs2_core::ndarray::{ArrayView1, ArrayView2};
8use serde::{Deserialize, Serialize};
9use sklears_core::{
10 error::{Result as SklResult, SklearsError},
11 traits::{Estimator, Fit},
12 types::Float,
13};
14use std::collections::HashMap;
15use std::fs::{File, OpenOptions};
16use std::io::{BufRead, BufReader, Write};
17use std::path::{Path, PathBuf};
18use std::time::{Duration, Instant};
19
20pub struct PerformanceRegressionTester {
22 pub storage: BenchmarkStorage,
24 pub analysis_config: StatisticalAnalysisConfig,
26 pub environment_config: EnvironmentConfig,
28 pub regression_thresholds: RegressionThresholds,
30 pub profiling_config: ProfilingConfig,
32}
33
34pub enum BenchmarkStorage {
36 File { path: PathBuf },
38 Memory { results: Vec<BenchmarkResult> },
40 Database { connection_string: String },
42}
43
44#[derive(Clone, Debug)]
46pub struct StatisticalAnalysisConfig {
47 pub confidence_level: f64,
49 pub min_samples_for_trend: usize,
51 pub rolling_window_size: usize,
53 pub statistical_tests: Vec<StatisticalTest>,
55 pub outlier_detection: OutlierDetection,
57}
58
59#[derive(Clone, Debug)]
61pub enum StatisticalTest {
62 TTest,
64 MannWhitneyU,
66 KolmogorovSmirnov,
68 LinearRegression,
70 ChangePointDetection,
72}
73
74#[derive(Clone, Debug)]
76pub enum OutlierDetection {
77 None,
79 ZScore { threshold: f64 },
81 IQR { multiplier: f64 },
83 ModifiedZScore { threshold: f64 },
85}
86
87#[derive(Clone, Debug)]
89pub struct EnvironmentConfig {
90 pub warmup_iterations: usize,
92 pub measurement_iterations: usize,
94 pub cpu_affinity: Option<Vec<usize>>,
96 pub memory_limit: Option<u64>,
98 pub capture_env_vars: Vec<String>,
100 pub collect_system_info: bool,
102}
103
104#[derive(Clone, Debug)]
106pub struct RegressionThresholds {
107 pub relative_threshold: f64,
109 pub absolute_threshold: Duration,
111 pub memory_threshold: u64,
113 pub throughput_threshold: f64,
115}
116
117#[derive(Clone, Debug)]
119pub struct ProfilingConfig {
120 pub cpu_profiling: bool,
122 pub memory_profiling: bool,
124 pub sampling_frequency: Duration,
126 pub output_directory: Option<PathBuf>,
128 pub detailed_call_stacks: bool,
130}
131
132#[derive(Clone, Debug, Serialize, Deserialize)]
134pub struct BenchmarkResult {
135 pub benchmark_id: String,
137 pub test_case: String,
139 pub timestamp: DateTime<Utc>,
141 pub metrics: PerformanceMetrics,
143 pub system_info: SystemInfo,
145 pub environment: EnvironmentMetadata,
147 pub commit_hash: Option<String>,
149 pub metadata: HashMap<String, String>,
151}
152
153#[derive(Clone, Debug, Serialize, Deserialize)]
155pub struct PerformanceMetrics {
156 pub execution_time: TimeStatistics,
158 pub memory_usage: MemoryStatistics,
160 pub throughput: ThroughputMetrics,
162 pub cpu_utilization: CpuStatistics,
164 pub custom_metrics: HashMap<String, f64>,
166}
167
168#[derive(Clone, Debug, Serialize, Deserialize)]
170pub struct TimeStatistics {
171 pub mean: Duration,
173 pub median: Duration,
175 pub std_dev: Duration,
177 pub min: Duration,
179 pub max: Duration,
181 pub p95: Duration,
183 pub p99: Duration,
185 pub samples: Vec<Duration>,
187}
188
189#[derive(Clone, Debug, Serialize, Deserialize)]
191pub struct MemoryStatistics {
192 pub peak_usage: u64,
194 pub average_usage: u64,
196 pub allocations: u64,
198 pub deallocations: u64,
200 pub fragmentation_score: f64,
202}
203
204#[derive(Clone, Debug, Serialize, Deserialize)]
206pub struct ThroughputMetrics {
207 pub ops_per_second: f64,
209 pub samples_per_second: f64,
211 pub features_per_second: f64,
213 pub bytes_per_second: f64,
215}
216
217#[derive(Clone, Debug, Serialize, Deserialize)]
219pub struct CpuStatistics {
220 pub average_utilization: f64,
222 pub peak_utilization: f64,
224 pub user_time: Duration,
226 pub kernel_time: Duration,
228}
229
230#[derive(Clone, Debug, Serialize, Deserialize)]
232pub struct SystemInfo {
233 pub os: String,
235 pub cpu_model: String,
237 pub cpu_cores: usize,
239 pub total_memory: u64,
241 pub available_memory: u64,
243 pub rust_version: String,
245 pub compiler_flags: Vec<String>,
247}
248
249#[derive(Clone, Debug, Serialize, Deserialize)]
251pub struct EnvironmentMetadata {
252 pub env_vars: HashMap<String, String>,
254 pub working_directory: PathBuf,
256 pub args: Vec<String>,
258 pub load_average: Vec<f64>,
260}
261
262#[derive(Clone, Debug)]
264pub struct RegressionAnalysis {
265 pub regression_detected: bool,
267 pub severity: RegressionSeverity,
269 pub affected_metrics: Vec<String>,
271 pub p_value: f64,
273 pub effect_size: f64,
275 pub confidence_interval: (f64, f64),
277 pub detailed_analysis: String,
279 pub recommendations: Vec<String>,
281}
282
283#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
285pub enum RegressionSeverity {
286 None,
288 Minor,
290 Moderate,
292 Severe,
294 Critical,
296}
297
298pub struct BenchmarkContext {
300 pub data_size: (usize, usize),
302 pub iterations: usize,
304 pub config: HashMap<String, String>,
306 pub random_seed: u64,
308}
309
310impl Default for PerformanceRegressionTester {
311 fn default() -> Self {
312 Self::new()
313 }
314}
315
316impl PerformanceRegressionTester {
317 #[must_use]
319 pub fn new() -> Self {
320 Self {
321 storage: BenchmarkStorage::Memory {
322 results: Vec::new(),
323 },
324 analysis_config: StatisticalAnalysisConfig::default(),
325 environment_config: EnvironmentConfig::default(),
326 regression_thresholds: RegressionThresholds::default(),
327 profiling_config: ProfilingConfig::default(),
328 }
329 }
330
331 pub fn with_file_storage<P: AsRef<Path>>(path: P) -> Self {
333 Self {
334 storage: BenchmarkStorage::File {
335 path: path.as_ref().to_path_buf(),
336 },
337 ..Self::new()
338 }
339 }
340
341 pub fn benchmark_component<T, I, O>(
343 &mut self,
344 component: &T,
345 input: I,
346 context: &BenchmarkContext,
347 test_name: &str,
348 ) -> SklResult<BenchmarkResult>
349 where
350 T: Fn(I) -> O,
351 I: Clone,
352 {
353 let start_time = Instant::now();
354 let mut execution_times = Vec::new();
355
356 for _ in 0..self.environment_config.warmup_iterations {
358 let _ = component(input.clone());
359 }
360
361 for _ in 0..self.environment_config.measurement_iterations {
363 let measure_start = Instant::now();
364 let _ = component(input.clone());
365 execution_times.push(measure_start.elapsed());
366 }
367
368 let time_stats = self.calculate_time_statistics(&execution_times);
369 let memory_stats = self.collect_memory_statistics();
370 let cpu_stats = self.collect_cpu_statistics();
371 let throughput = self.calculate_throughput(&time_stats, context);
372
373 let result = BenchmarkResult {
374 benchmark_id: format!("{}_{}", test_name, Utc::now().timestamp()),
375 test_case: test_name.to_string(),
376 timestamp: Utc::now(),
377 metrics: PerformanceMetrics {
378 execution_time: time_stats,
379 memory_usage: memory_stats,
380 throughput,
381 cpu_utilization: cpu_stats,
382 custom_metrics: HashMap::new(),
383 },
384 system_info: self.collect_system_info(),
385 environment: self.collect_environment_metadata(),
386 commit_hash: self.get_git_commit_hash(),
387 metadata: context.config.clone(),
388 };
389
390 self.store_result(&result)?;
391 Ok(result)
392 }
393
394 pub fn benchmark_pipeline<'a, S>(
396 &mut self,
397 pipeline: &crate::Pipeline<S>,
398 x: &ArrayView2<'a, Float>,
399 y: Option<&'a ArrayView1<'a, Float>>,
400 test_name: &str,
401 ) -> SklResult<BenchmarkResult>
402 where
403 S: std::fmt::Debug + Clone,
404 crate::Pipeline<S>: Clone + Fit<ArrayView2<'a, Float>, Option<&'a ArrayView1<'a, Float>>>,
405 {
406 let context = BenchmarkContext {
407 data_size: (x.nrows(), x.ncols()),
408 iterations: self.environment_config.measurement_iterations,
409 config: HashMap::new(),
410 random_seed: 42,
411 };
412
413 let benchmark_fn = |(): ()| -> SklResult<()> {
414 let pipeline_clone = pipeline.clone();
416 if let Some(y_vals) = y {
417 let y_option = Some(y_vals);
418 let _fitted = pipeline_clone.fit(x, &y_option)?;
419 }
420 Ok(())
421 };
422
423 self.benchmark_component(&benchmark_fn, (), &context, test_name)
424 }
425
426 pub fn analyze_regressions(&self, test_name: &str) -> SklResult<RegressionAnalysis> {
428 let results = self.get_historical_results(test_name)?;
429
430 if results.len() < self.analysis_config.min_samples_for_trend {
431 return Ok(RegressionAnalysis {
432 regression_detected: false,
433 severity: RegressionSeverity::None,
434 affected_metrics: vec![],
435 p_value: 1.0,
436 effect_size: 0.0,
437 confidence_interval: (0.0, 0.0),
438 detailed_analysis: "Insufficient data for trend analysis".to_string(),
439 recommendations: vec!["Collect more benchmark data".to_string()],
440 });
441 }
442
443 let regression_detected = self.detect_performance_regression(&results)?;
445 let severity = self.calculate_regression_severity(&results)?;
446 let affected_metrics = self.identify_affected_metrics(&results)?;
447
448 let p_value = self.calculate_statistical_significance(&results)?;
450 let effect_size = self.calculate_effect_size(&results)?;
451 let confidence_interval = self.calculate_confidence_interval(&results)?;
452
453 let detailed_analysis = self.generate_detailed_analysis(&results)?;
454 let recommendations = self.generate_recommendations(&results, &severity);
455
456 Ok(RegressionAnalysis {
457 regression_detected,
458 severity,
459 affected_metrics,
460 p_value,
461 effect_size,
462 confidence_interval,
463 detailed_analysis,
464 recommendations,
465 })
466 }
467
468 pub fn generate_report(&self, test_pattern: Option<&str>) -> SklResult<PerformanceReport> {
470 let all_results = self.get_all_results()?;
471
472 let filtered_results = match test_pattern {
473 Some(pattern) => all_results
474 .into_iter()
475 .filter(|r| r.test_case.contains(pattern))
476 .collect(),
477 None => all_results,
478 };
479
480 let report = PerformanceReport::new(filtered_results, &self.analysis_config);
481 Ok(report)
482 }
483
484 fn calculate_time_statistics(&self, times: &[Duration]) -> TimeStatistics {
486 let mut sorted_times = times.to_vec();
487 sorted_times.sort();
488
489 let mean = Duration::from_nanos(
490 times.iter().map(|d| d.as_nanos() as u64).sum::<u64>() / times.len() as u64,
491 );
492
493 let median = sorted_times[times.len() / 2];
494
495 let variance = times
496 .iter()
497 .map(|d| {
498 let diff = d.as_nanos() as i64 - mean.as_nanos() as i64;
499 (diff * diff) as u64
500 })
501 .sum::<u64>()
502 / times.len() as u64;
503
504 let std_dev = Duration::from_nanos((variance as f64).sqrt() as u64);
505
506 let p95_idx = (times.len() as f64 * 0.95) as usize;
507 let p99_idx = (times.len() as f64 * 0.99) as usize;
508
509 TimeStatistics {
510 mean,
511 median,
512 std_dev,
513 min: sorted_times.first().copied().unwrap_or_default(),
514 max: sorted_times.last().copied().unwrap_or_default(),
515 p95: sorted_times[p95_idx.min(times.len() - 1)],
516 p99: sorted_times[p99_idx.min(times.len() - 1)],
517 samples: times.to_vec(),
518 }
519 }
520
521 fn collect_memory_statistics(&self) -> MemoryStatistics {
522 MemoryStatistics {
524 peak_usage: 1024 * 1024, average_usage: 512 * 1024,
526 allocations: 100,
527 deallocations: 95,
528 fragmentation_score: 0.1,
529 }
530 }
531
532 fn collect_cpu_statistics(&self) -> CpuStatistics {
533 CpuStatistics {
535 average_utilization: 0.75,
536 peak_utilization: 0.95,
537 user_time: Duration::from_millis(100),
538 kernel_time: Duration::from_millis(10),
539 }
540 }
541
542 fn calculate_throughput(
543 &self,
544 time_stats: &TimeStatistics,
545 context: &BenchmarkContext,
546 ) -> ThroughputMetrics {
547 let ops_per_second = 1.0 / time_stats.mean.as_secs_f64();
548 let samples_per_second = context.data_size.0 as f64 / time_stats.mean.as_secs_f64();
549 let features_per_second =
550 (context.data_size.0 * context.data_size.1) as f64 / time_stats.mean.as_secs_f64();
551
552 ThroughputMetrics {
553 ops_per_second,
554 samples_per_second,
555 features_per_second,
556 bytes_per_second: features_per_second * 8.0, }
558 }
559
560 fn collect_system_info(&self) -> SystemInfo {
561 SystemInfo {
562 os: std::env::consts::OS.to_string(),
563 cpu_model: "Unknown".to_string(), cpu_cores: num_cpus::get(),
565 total_memory: 16 * 1024 * 1024 * 1024, available_memory: 8 * 1024 * 1024 * 1024, rust_version: "1.75.0".to_string(), compiler_flags: vec!["--release".to_string()],
569 }
570 }
571
572 fn collect_environment_metadata(&self) -> EnvironmentMetadata {
573 let mut env_vars = HashMap::new();
574 for var_name in &self.environment_config.capture_env_vars {
575 if let Ok(value) = std::env::var(var_name) {
576 env_vars.insert(var_name.clone(), value);
577 }
578 }
579
580 EnvironmentMetadata {
581 env_vars,
582 working_directory: std::env::current_dir().unwrap_or_default(),
583 args: std::env::args().collect(),
584 load_average: vec![0.5, 0.6, 0.7], }
586 }
587
588 fn get_git_commit_hash(&self) -> Option<String> {
589 None
591 }
592
593 fn store_result(&mut self, result: &BenchmarkResult) -> SklResult<()> {
594 match &mut self.storage {
595 BenchmarkStorage::Memory { results } => {
596 results.push(result.clone());
597 }
598 BenchmarkStorage::File { path } => {
599 let mut file = OpenOptions::new()
600 .create(true)
601 .append(true)
602 .open(path)
603 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
604
605 let json_line = serde_json::to_string(result).map_err(|e| {
606 SklearsError::InvalidInput(format!("Failed to serialize result: {e}"))
607 })?;
608
609 writeln!(file, "{json_line}").map_err(|e| {
610 SklearsError::InvalidInput(format!("Failed to write result: {e}"))
611 })?;
612 }
613 BenchmarkStorage::Database { .. } => {
614 return Err(SklearsError::NotImplemented(
615 "Database storage not implemented".to_string(),
616 ));
617 }
618 }
619 Ok(())
620 }
621
622 fn get_historical_results(&self, test_name: &str) -> SklResult<Vec<BenchmarkResult>> {
623 match &self.storage {
624 BenchmarkStorage::Memory { results } => Ok(results
625 .iter()
626 .filter(|r| r.test_case == test_name)
627 .cloned()
628 .collect()),
629 BenchmarkStorage::File { path } => {
630 let file = File::open(path)
631 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
632
633 let reader = BufReader::new(file);
634 let mut results = Vec::new();
635
636 for line in reader.lines() {
637 let line = line.map_err(|e| {
638 SklearsError::InvalidInput(format!("Failed to read line: {e}"))
639 })?;
640 let result: BenchmarkResult = serde_json::from_str(&line).map_err(|e| {
641 SklearsError::InvalidInput(format!("Failed to parse result: {e}"))
642 })?;
643
644 if result.test_case == test_name {
645 results.push(result);
646 }
647 }
648
649 Ok(results)
650 }
651 BenchmarkStorage::Database { .. } => Err(SklearsError::NotImplemented(
652 "Database storage not implemented".to_string(),
653 )),
654 }
655 }
656
657 fn get_all_results(&self) -> SklResult<Vec<BenchmarkResult>> {
658 match &self.storage {
659 BenchmarkStorage::Memory { results } => Ok(results.clone()),
660 BenchmarkStorage::File { path } => {
661 let file = File::open(path)
662 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
663
664 let reader = BufReader::new(file);
665 let mut results = Vec::new();
666
667 for line in reader.lines() {
668 let line = line.map_err(|e| {
669 SklearsError::InvalidInput(format!("Failed to read line: {e}"))
670 })?;
671 let result: BenchmarkResult = serde_json::from_str(&line).map_err(|e| {
672 SklearsError::InvalidInput(format!("Failed to parse result: {e}"))
673 })?;
674 results.push(result);
675 }
676
677 Ok(results)
678 }
679 BenchmarkStorage::Database { .. } => Err(SklearsError::NotImplemented(
680 "Database storage not implemented".to_string(),
681 )),
682 }
683 }
684
685 fn detect_performance_regression(&self, results: &[BenchmarkResult]) -> SklResult<bool> {
687 if results.len() < 2 {
688 return Ok(false);
689 }
690
691 let recent = &results[results.len() - 1];
692 let baseline = &results[results.len() - 2];
693
694 let regression_ratio = recent.metrics.execution_time.mean.as_secs_f64()
695 / baseline.metrics.execution_time.mean.as_secs_f64();
696
697 Ok(regression_ratio > (1.0 + self.regression_thresholds.relative_threshold))
698 }
699
700 fn calculate_regression_severity(
701 &self,
702 results: &[BenchmarkResult],
703 ) -> SklResult<RegressionSeverity> {
704 if results.len() < 2 {
705 return Ok(RegressionSeverity::None);
706 }
707
708 let recent = &results[results.len() - 1];
709 let baseline = &results[results.len() - 2];
710
711 let regression_ratio = recent.metrics.execution_time.mean.as_secs_f64()
712 / baseline.metrics.execution_time.mean.as_secs_f64();
713
714 match regression_ratio {
715 r if r < 1.05 => Ok(RegressionSeverity::None),
716 r if r < 1.15 => Ok(RegressionSeverity::Minor),
717 r if r < 1.3 => Ok(RegressionSeverity::Moderate),
718 r if r < 1.5 => Ok(RegressionSeverity::Severe),
719 _ => Ok(RegressionSeverity::Critical),
720 }
721 }
722
723 fn identify_affected_metrics(&self, _results: &[BenchmarkResult]) -> SklResult<Vec<String>> {
724 Ok(vec!["execution_time".to_string()])
726 }
727
728 fn calculate_statistical_significance(&self, _results: &[BenchmarkResult]) -> SklResult<f64> {
729 Ok(0.05)
731 }
732
733 fn calculate_effect_size(&self, _results: &[BenchmarkResult]) -> SklResult<f64> {
734 Ok(0.5)
736 }
737
738 fn calculate_confidence_interval(&self, _results: &[BenchmarkResult]) -> SklResult<(f64, f64)> {
739 Ok((0.1, 0.3))
741 }
742
743 fn generate_detailed_analysis(&self, _results: &[BenchmarkResult]) -> SklResult<String> {
744 Ok(
745 "Performance analysis complete. Minor regression detected in execution time."
746 .to_string(),
747 )
748 }
749
750 fn generate_recommendations(
751 &self,
752 _results: &[BenchmarkResult],
753 severity: &RegressionSeverity,
754 ) -> Vec<String> {
755 match severity {
756 RegressionSeverity::None => vec!["Performance is stable".to_string()],
757 RegressionSeverity::Minor => vec![
758 "Monitor performance in future releases".to_string(),
759 "Consider profiling to identify optimization opportunities".to_string(),
760 ],
761 RegressionSeverity::Moderate => vec![
762 "Investigate recent changes that may have caused regression".to_string(),
763 "Run detailed profiling to identify bottlenecks".to_string(),
764 "Consider reverting problematic changes".to_string(),
765 ],
766 RegressionSeverity::Severe | RegressionSeverity::Critical => vec![
767 "Immediate investigation required".to_string(),
768 "Consider blocking release until regression is fixed".to_string(),
769 "Run comprehensive profiling and analysis".to_string(),
770 "Review all recent changes".to_string(),
771 ],
772 }
773 }
774}
775
776pub struct PerformanceReport {
778 pub results: Vec<BenchmarkResult>,
780 pub summary: ReportSummary,
782 pub trends: TrendAnalysis,
784 pub regressions: Vec<RegressionAlert>,
786}
787
788#[derive(Clone, Debug)]
790pub struct ReportSummary {
791 pub total_benchmarks: usize,
793 pub test_cases: usize,
795 pub time_range: (DateTime<Utc>, DateTime<Utc>),
797 pub average_metrics: PerformanceMetrics,
799}
800
801#[derive(Clone, Debug)]
803pub struct TrendAnalysis {
804 pub trends_by_test: HashMap<String, PerformanceTrend>,
806 pub overall_trend: PerformanceTrend,
808}
809
810#[derive(Clone, Debug)]
812pub enum PerformanceTrend {
813 Improving { rate: f64 },
815 Stable,
817 Degrading { rate: f64 },
819 Insufficient,
821}
822
823#[derive(Clone, Debug)]
825pub struct RegressionAlert {
826 pub test_case: String,
828 pub severity: RegressionSeverity,
830 pub description: String,
832 pub detected_at: DateTime<Utc>,
834}
835
836impl PerformanceReport {
837 #[must_use]
838 pub fn new(results: Vec<BenchmarkResult>, _config: &StatisticalAnalysisConfig) -> Self {
839 let summary = ReportSummary::from_results(&results);
840 let trends = TrendAnalysis::from_results(&results);
841 let regressions = Self::detect_regressions(&results);
842
843 Self {
844 results,
845 summary,
846 trends,
847 regressions,
848 }
849 }
850
851 fn detect_regressions(results: &[BenchmarkResult]) -> Vec<RegressionAlert> {
852 vec![]
854 }
855}
856
857impl ReportSummary {
858 fn from_results(results: &[BenchmarkResult]) -> Self {
859 let total_benchmarks = results.len();
860 let test_cases = results
861 .iter()
862 .map(|r| r.test_case.clone())
863 .collect::<std::collections::HashSet<_>>()
864 .len();
865
866 let (start_time, end_time) = if results.is_empty() {
867 (Utc::now(), Utc::now())
868 } else {
869 let start = results
870 .iter()
871 .map(|r| r.timestamp)
872 .min()
873 .unwrap_or_default();
874 let end = results
875 .iter()
876 .map(|r| r.timestamp)
877 .max()
878 .unwrap_or_default();
879 (start, end)
880 };
881
882 let average_metrics = PerformanceMetrics {
884 execution_time: TimeStatistics {
885 mean: Duration::from_millis(100),
886 median: Duration::from_millis(95),
887 std_dev: Duration::from_millis(10),
888 min: Duration::from_millis(80),
889 max: Duration::from_millis(150),
890 p95: Duration::from_millis(130),
891 p99: Duration::from_millis(145),
892 samples: vec![],
893 },
894 memory_usage: MemoryStatistics {
895 peak_usage: 1024 * 1024,
896 average_usage: 512 * 1024,
897 allocations: 100,
898 deallocations: 95,
899 fragmentation_score: 0.1,
900 },
901 throughput: ThroughputMetrics {
902 ops_per_second: 100.0,
903 samples_per_second: 1000.0,
904 features_per_second: 10000.0,
905 bytes_per_second: 80000.0,
906 },
907 cpu_utilization: CpuStatistics {
908 average_utilization: 0.75,
909 peak_utilization: 0.95,
910 user_time: Duration::from_millis(100),
911 kernel_time: Duration::from_millis(10),
912 },
913 custom_metrics: HashMap::new(),
914 };
915
916 Self {
917 total_benchmarks,
918 test_cases,
919 time_range: (start_time, end_time),
920 average_metrics,
921 }
922 }
923}
924
925impl TrendAnalysis {
926 fn from_results(_results: &[BenchmarkResult]) -> Self {
927 Self {
928 trends_by_test: HashMap::new(),
929 overall_trend: PerformanceTrend::Stable,
930 }
931 }
932}
933
934impl Default for StatisticalAnalysisConfig {
936 fn default() -> Self {
937 Self {
938 confidence_level: 0.95,
939 min_samples_for_trend: 5,
940 rolling_window_size: 10,
941 statistical_tests: vec![StatisticalTest::TTest, StatisticalTest::LinearRegression],
942 outlier_detection: OutlierDetection::IQR { multiplier: 1.5 },
943 }
944 }
945}
946
947impl Default for EnvironmentConfig {
948 fn default() -> Self {
949 Self {
950 warmup_iterations: 5,
951 measurement_iterations: 10,
952 cpu_affinity: None,
953 memory_limit: None,
954 capture_env_vars: vec!["RUST_VERSION".to_string(), "CARGO_PKG_VERSION".to_string()],
955 collect_system_info: true,
956 }
957 }
958}
959
960impl Default for RegressionThresholds {
961 fn default() -> Self {
962 Self {
963 relative_threshold: 0.05, absolute_threshold: Duration::from_millis(10),
965 memory_threshold: 1024 * 1024, throughput_threshold: 0.05, }
968 }
969}
970
971impl Default for ProfilingConfig {
972 fn default() -> Self {
973 Self {
974 cpu_profiling: false,
975 memory_profiling: false,
976 sampling_frequency: Duration::from_millis(1),
977 output_directory: None,
978 detailed_call_stacks: false,
979 }
980 }
981}
982
983#[allow(non_snake_case)]
984#[cfg(test)]
985mod tests {
986 use super::*;
987
988 #[test]
989 fn test_performance_tester_creation() {
990 let tester = PerformanceRegressionTester::new();
991 assert!(matches!(tester.storage, BenchmarkStorage::Memory { .. }));
992 }
993
994 #[test]
995 fn test_file_storage_creation() {
996 let tester = PerformanceRegressionTester::with_file_storage("/tmp/benchmarks.jsonl");
997 assert!(matches!(tester.storage, BenchmarkStorage::File { .. }));
998 }
999
1000 #[test]
1001 fn test_time_statistics_calculation() {
1002 let tester = PerformanceRegressionTester::new();
1003 let times = vec![
1004 Duration::from_millis(100),
1005 Duration::from_millis(110),
1006 Duration::from_millis(95),
1007 Duration::from_millis(105),
1008 Duration::from_millis(120),
1009 ];
1010
1011 let stats = tester.calculate_time_statistics(×);
1012 assert_eq!(stats.min, Duration::from_millis(95));
1013 assert_eq!(stats.max, Duration::from_millis(120));
1014 assert_eq!(stats.samples.len(), 5);
1015 }
1016
1017 #[test]
1018 fn test_benchmark_component() {
1019 let mut tester = PerformanceRegressionTester::new();
1020
1021 let test_function = |x: i32| x * 2;
1022 let context = BenchmarkContext {
1023 data_size: (1000, 10),
1024 iterations: 5,
1025 config: HashMap::new(),
1026 random_seed: 42,
1027 };
1028
1029 let result = tester.benchmark_component(&test_function, 42, &context, "test_multiply");
1030 assert!(result.is_ok());
1031
1032 let benchmark_result = result.expect("operation should succeed");
1033 assert_eq!(benchmark_result.test_case, "test_multiply");
1034 assert!(!benchmark_result.metrics.execution_time.samples.is_empty());
1035 }
1036
1037 #[test]
1038 fn test_regression_severity_ordering() {
1039 assert!(RegressionSeverity::Critical > RegressionSeverity::Severe);
1040 assert!(RegressionSeverity::Severe > RegressionSeverity::Moderate);
1041 assert!(RegressionSeverity::Moderate > RegressionSeverity::Minor);
1042 assert!(RegressionSeverity::Minor > RegressionSeverity::None);
1043 }
1044
1045 #[test]
1046 fn test_throughput_calculation() {
1047 let tester = PerformanceRegressionTester::new();
1048 let time_stats = TimeStatistics {
1049 mean: Duration::from_millis(100),
1050 median: Duration::from_millis(100),
1051 std_dev: Duration::from_millis(5),
1052 min: Duration::from_millis(90),
1053 max: Duration::from_millis(110),
1054 p95: Duration::from_millis(108),
1055 p99: Duration::from_millis(110),
1056 samples: vec![],
1057 };
1058
1059 let context = BenchmarkContext {
1060 data_size: (1000, 10),
1061 iterations: 10,
1062 config: HashMap::new(),
1063 random_seed: 42,
1064 };
1065
1066 let throughput = tester.calculate_throughput(&time_stats, &context);
1067 assert_eq!(throughput.ops_per_second, 10.0); assert_eq!(throughput.samples_per_second, 10000.0); }
1070}