1use chrono::{DateTime, Utc};
7use scirs2_core::ndarray::{ArrayView1, ArrayView2};
8use serde::{Deserialize, Serialize};
9use sklears_core::{
10 error::{Result as SklResult, SklearsError},
11 traits::{Estimator, Fit},
12 types::Float,
13};
14use std::collections::HashMap;
15use std::fs::{File, OpenOptions};
16use std::io::{BufRead, BufReader, Write};
17use std::path::{Path, PathBuf};
18use std::time::{Duration, Instant};
19
20pub struct PerformanceRegressionTester {
22 pub storage: BenchmarkStorage,
24 pub analysis_config: StatisticalAnalysisConfig,
26 pub environment_config: EnvironmentConfig,
28 pub regression_thresholds: RegressionThresholds,
30 pub profiling_config: ProfilingConfig,
32}
33
34pub enum BenchmarkStorage {
36 File { path: PathBuf },
38 Memory { results: Vec<BenchmarkResult> },
40 Database { connection_string: String },
42}
43
44#[derive(Clone, Debug)]
46pub struct StatisticalAnalysisConfig {
47 pub confidence_level: f64,
49 pub min_samples_for_trend: usize,
51 pub rolling_window_size: usize,
53 pub statistical_tests: Vec<StatisticalTest>,
55 pub outlier_detection: OutlierDetection,
57}
58
59#[derive(Clone, Debug)]
61pub enum StatisticalTest {
62 TTest,
64 MannWhitneyU,
66 KolmogorovSmirnov,
68 LinearRegression,
70 ChangePointDetection,
72}
73
74#[derive(Clone, Debug)]
76pub enum OutlierDetection {
77 None,
79 ZScore { threshold: f64 },
81 IQR { multiplier: f64 },
83 ModifiedZScore { threshold: f64 },
85}
86
87#[derive(Clone, Debug)]
89pub struct EnvironmentConfig {
90 pub warmup_iterations: usize,
92 pub measurement_iterations: usize,
94 pub cpu_affinity: Option<Vec<usize>>,
96 pub memory_limit: Option<u64>,
98 pub capture_env_vars: Vec<String>,
100 pub collect_system_info: bool,
102}
103
104#[derive(Clone, Debug)]
106pub struct RegressionThresholds {
107 pub relative_threshold: f64,
109 pub absolute_threshold: Duration,
111 pub memory_threshold: u64,
113 pub throughput_threshold: f64,
115}
116
117#[derive(Clone, Debug)]
119pub struct ProfilingConfig {
120 pub cpu_profiling: bool,
122 pub memory_profiling: bool,
124 pub sampling_frequency: Duration,
126 pub output_directory: Option<PathBuf>,
128 pub detailed_call_stacks: bool,
130}
131
132#[derive(Clone, Debug, Serialize, Deserialize)]
134pub struct BenchmarkResult {
135 pub benchmark_id: String,
137 pub test_case: String,
139 pub timestamp: DateTime<Utc>,
141 pub metrics: PerformanceMetrics,
143 pub system_info: SystemInfo,
145 pub environment: EnvironmentMetadata,
147 pub commit_hash: Option<String>,
149 pub metadata: HashMap<String, String>,
151}
152
153#[derive(Clone, Debug, Serialize, Deserialize)]
155pub struct PerformanceMetrics {
156 pub execution_time: TimeStatistics,
158 pub memory_usage: MemoryStatistics,
160 pub throughput: ThroughputMetrics,
162 pub cpu_utilization: CpuStatistics,
164 pub custom_metrics: HashMap<String, f64>,
166}
167
168#[derive(Clone, Debug, Serialize, Deserialize)]
170pub struct TimeStatistics {
171 pub mean: Duration,
173 pub median: Duration,
175 pub std_dev: Duration,
177 pub min: Duration,
179 pub max: Duration,
181 pub p95: Duration,
183 pub p99: Duration,
185 pub samples: Vec<Duration>,
187}
188
189#[derive(Clone, Debug, Serialize, Deserialize)]
191pub struct MemoryStatistics {
192 pub peak_usage: u64,
194 pub average_usage: u64,
196 pub allocations: u64,
198 pub deallocations: u64,
200 pub fragmentation_score: f64,
202}
203
204#[derive(Clone, Debug, Serialize, Deserialize)]
206pub struct ThroughputMetrics {
207 pub ops_per_second: f64,
209 pub samples_per_second: f64,
211 pub features_per_second: f64,
213 pub bytes_per_second: f64,
215}
216
217#[derive(Clone, Debug, Serialize, Deserialize)]
219pub struct CpuStatistics {
220 pub average_utilization: f64,
222 pub peak_utilization: f64,
224 pub user_time: Duration,
226 pub kernel_time: Duration,
228}
229
230#[derive(Clone, Debug, Serialize, Deserialize)]
232pub struct SystemInfo {
233 pub os: String,
235 pub cpu_model: String,
237 pub cpu_cores: usize,
239 pub total_memory: u64,
241 pub available_memory: u64,
243 pub rust_version: String,
245 pub compiler_flags: Vec<String>,
247}
248
249#[derive(Clone, Debug, Serialize, Deserialize)]
251pub struct EnvironmentMetadata {
252 pub env_vars: HashMap<String, String>,
254 pub working_directory: PathBuf,
256 pub args: Vec<String>,
258 pub load_average: Vec<f64>,
260}
261
262#[derive(Clone, Debug)]
264pub struct RegressionAnalysis {
265 pub regression_detected: bool,
267 pub severity: RegressionSeverity,
269 pub affected_metrics: Vec<String>,
271 pub p_value: f64,
273 pub effect_size: f64,
275 pub confidence_interval: (f64, f64),
277 pub detailed_analysis: String,
279 pub recommendations: Vec<String>,
281}
282
283#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
285pub enum RegressionSeverity {
286 None,
288 Minor,
290 Moderate,
292 Severe,
294 Critical,
296}
297
298pub struct BenchmarkContext {
300 pub data_size: (usize, usize),
302 pub iterations: usize,
304 pub config: HashMap<String, String>,
306 pub random_seed: u64,
308}
309
310impl Default for PerformanceRegressionTester {
311 fn default() -> Self {
312 Self::new()
313 }
314}
315
316impl PerformanceRegressionTester {
317 #[must_use]
319 pub fn new() -> Self {
320 Self {
321 storage: BenchmarkStorage::Memory {
322 results: Vec::new(),
323 },
324 analysis_config: StatisticalAnalysisConfig::default(),
325 environment_config: EnvironmentConfig::default(),
326 regression_thresholds: RegressionThresholds::default(),
327 profiling_config: ProfilingConfig::default(),
328 }
329 }
330
331 pub fn with_file_storage<P: AsRef<Path>>(path: P) -> Self {
333 Self {
334 storage: BenchmarkStorage::File {
335 path: path.as_ref().to_path_buf(),
336 },
337 ..Self::new()
338 }
339 }
340
341 pub fn benchmark_component<T, I, O>(
343 &mut self,
344 component: &T,
345 input: I,
346 context: &BenchmarkContext,
347 test_name: &str,
348 ) -> SklResult<BenchmarkResult>
349 where
350 T: Fn(I) -> O,
351 I: Clone,
352 {
353 let start_time = Instant::now();
354 let mut execution_times = Vec::new();
355
356 for _ in 0..self.environment_config.warmup_iterations {
358 let _ = component(input.clone());
359 }
360
361 for _ in 0..self.environment_config.measurement_iterations {
363 let measure_start = Instant::now();
364 let _ = component(input.clone());
365 execution_times.push(measure_start.elapsed());
366 }
367
368 let time_stats = self.calculate_time_statistics(&execution_times);
369 let memory_stats = self.collect_memory_statistics();
370 let cpu_stats = self.collect_cpu_statistics();
371 let throughput = self.calculate_throughput(&time_stats, context);
372
373 let result = BenchmarkResult {
374 benchmark_id: format!("{}_{}", test_name, Utc::now().timestamp()),
375 test_case: test_name.to_string(),
376 timestamp: Utc::now(),
377 metrics: PerformanceMetrics {
378 execution_time: time_stats,
379 memory_usage: memory_stats,
380 throughput,
381 cpu_utilization: cpu_stats,
382 custom_metrics: HashMap::new(),
383 },
384 system_info: self.collect_system_info(),
385 environment: self.collect_environment_metadata(),
386 commit_hash: self.get_git_commit_hash(),
387 metadata: context.config.clone(),
388 };
389
390 self.store_result(&result)?;
391 Ok(result)
392 }
393
394 pub fn benchmark_pipeline<'a, S>(
396 &mut self,
397 pipeline: &crate::Pipeline<S>,
398 x: &ArrayView2<'a, Float>,
399 y: Option<&'a ArrayView1<'a, Float>>,
400 test_name: &str,
401 ) -> SklResult<BenchmarkResult>
402 where
403 S: std::fmt::Debug + Clone,
404 crate::Pipeline<S>: Clone + Fit<ArrayView2<'a, Float>, Option<&'a ArrayView1<'a, Float>>>,
405 {
406 let context = BenchmarkContext {
407 data_size: (x.nrows(), x.ncols()),
408 iterations: self.environment_config.measurement_iterations,
409 config: HashMap::new(),
410 random_seed: 42,
411 };
412
413 let benchmark_fn = |(): ()| -> SklResult<()> {
414 let pipeline_clone = pipeline.clone();
416 if let Some(y_vals) = y {
417 let y_option = Some(y_vals);
418 let _fitted = pipeline_clone.fit(x, &y_option)?;
419 }
420 Ok(())
421 };
422
423 self.benchmark_component(&benchmark_fn, (), &context, test_name)
424 }
425
426 pub fn analyze_regressions(&self, test_name: &str) -> SklResult<RegressionAnalysis> {
428 let results = self.get_historical_results(test_name)?;
429
430 if results.len() < self.analysis_config.min_samples_for_trend {
431 return Ok(RegressionAnalysis {
432 regression_detected: false,
433 severity: RegressionSeverity::None,
434 affected_metrics: vec![],
435 p_value: 1.0,
436 effect_size: 0.0,
437 confidence_interval: (0.0, 0.0),
438 detailed_analysis: "Insufficient data for trend analysis".to_string(),
439 recommendations: vec!["Collect more benchmark data".to_string()],
440 });
441 }
442
443 let regression_detected = self.detect_performance_regression(&results)?;
445 let severity = self.calculate_regression_severity(&results)?;
446 let affected_metrics = self.identify_affected_metrics(&results)?;
447
448 let p_value = self.calculate_statistical_significance(&results)?;
450 let effect_size = self.calculate_effect_size(&results)?;
451 let confidence_interval = self.calculate_confidence_interval(&results)?;
452
453 let detailed_analysis = self.generate_detailed_analysis(&results)?;
454 let recommendations = self.generate_recommendations(&results, &severity);
455
456 Ok(RegressionAnalysis {
457 regression_detected,
458 severity,
459 affected_metrics,
460 p_value,
461 effect_size,
462 confidence_interval,
463 detailed_analysis,
464 recommendations,
465 })
466 }
467
468 pub fn generate_report(&self, test_pattern: Option<&str>) -> SklResult<PerformanceReport> {
470 let all_results = self.get_all_results()?;
471
472 let filtered_results = match test_pattern {
473 Some(pattern) => all_results
474 .into_iter()
475 .filter(|r| r.test_case.contains(pattern))
476 .collect(),
477 None => all_results,
478 };
479
480 let report = PerformanceReport::new(filtered_results, &self.analysis_config);
481 Ok(report)
482 }
483
484 fn calculate_time_statistics(&self, times: &[Duration]) -> TimeStatistics {
486 let mut sorted_times = times.to_vec();
487 sorted_times.sort();
488
489 let mean = Duration::from_nanos(
490 times.iter().map(|d| d.as_nanos() as u64).sum::<u64>() / times.len() as u64,
491 );
492
493 let median = sorted_times[times.len() / 2];
494
495 let variance = times
496 .iter()
497 .map(|d| {
498 let diff = d.as_nanos() as i64 - mean.as_nanos() as i64;
499 (diff * diff) as u64
500 })
501 .sum::<u64>()
502 / times.len() as u64;
503
504 let std_dev = Duration::from_nanos((variance as f64).sqrt() as u64);
505
506 let p95_idx = (times.len() as f64 * 0.95) as usize;
507 let p99_idx = (times.len() as f64 * 0.99) as usize;
508
509 TimeStatistics {
510 mean,
511 median,
512 std_dev,
513 min: *sorted_times.first().unwrap(),
514 max: *sorted_times.last().unwrap(),
515 p95: sorted_times[p95_idx.min(times.len() - 1)],
516 p99: sorted_times[p99_idx.min(times.len() - 1)],
517 samples: times.to_vec(),
518 }
519 }
520
521 fn collect_memory_statistics(&self) -> MemoryStatistics {
522 MemoryStatistics {
524 peak_usage: 1024 * 1024, average_usage: 512 * 1024,
526 allocations: 100,
527 deallocations: 95,
528 fragmentation_score: 0.1,
529 }
530 }
531
532 fn collect_cpu_statistics(&self) -> CpuStatistics {
533 CpuStatistics {
535 average_utilization: 0.75,
536 peak_utilization: 0.95,
537 user_time: Duration::from_millis(100),
538 kernel_time: Duration::from_millis(10),
539 }
540 }
541
542 fn calculate_throughput(
543 &self,
544 time_stats: &TimeStatistics,
545 context: &BenchmarkContext,
546 ) -> ThroughputMetrics {
547 let ops_per_second = 1.0 / time_stats.mean.as_secs_f64();
548 let samples_per_second = context.data_size.0 as f64 / time_stats.mean.as_secs_f64();
549 let features_per_second =
550 (context.data_size.0 * context.data_size.1) as f64 / time_stats.mean.as_secs_f64();
551
552 ThroughputMetrics {
553 ops_per_second,
554 samples_per_second,
555 features_per_second,
556 bytes_per_second: features_per_second * 8.0, }
558 }
559
560 fn collect_system_info(&self) -> SystemInfo {
561 SystemInfo {
562 os: std::env::consts::OS.to_string(),
563 cpu_model: "Unknown".to_string(), cpu_cores: num_cpus::get(),
565 total_memory: 16 * 1024 * 1024 * 1024, available_memory: 8 * 1024 * 1024 * 1024, rust_version: "1.75.0".to_string(), compiler_flags: vec!["--release".to_string()],
569 }
570 }
571
572 fn collect_environment_metadata(&self) -> EnvironmentMetadata {
573 let mut env_vars = HashMap::new();
574 for var_name in &self.environment_config.capture_env_vars {
575 if let Ok(value) = std::env::var(var_name) {
576 env_vars.insert(var_name.clone(), value);
577 }
578 }
579
580 EnvironmentMetadata {
581 env_vars,
582 working_directory: std::env::current_dir().unwrap_or_default(),
583 args: std::env::args().collect(),
584 load_average: vec![0.5, 0.6, 0.7], }
586 }
587
588 fn get_git_commit_hash(&self) -> Option<String> {
589 None
591 }
592
593 fn store_result(&mut self, result: &BenchmarkResult) -> SklResult<()> {
594 match &mut self.storage {
595 BenchmarkStorage::Memory { results } => {
596 results.push(result.clone());
597 }
598 BenchmarkStorage::File { path } => {
599 let mut file = OpenOptions::new()
600 .create(true)
601 .append(true)
602 .open(path)
603 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
604
605 let json_line = serde_json::to_string(result).map_err(|e| {
606 SklearsError::InvalidInput(format!("Failed to serialize result: {e}"))
607 })?;
608
609 writeln!(file, "{json_line}").map_err(|e| {
610 SklearsError::InvalidInput(format!("Failed to write result: {e}"))
611 })?;
612 }
613 BenchmarkStorage::Database { .. } => {
614 return Err(SklearsError::NotImplemented(
615 "Database storage not implemented".to_string(),
616 ));
617 }
618 }
619 Ok(())
620 }
621
622 fn get_historical_results(&self, test_name: &str) -> SklResult<Vec<BenchmarkResult>> {
623 match &self.storage {
624 BenchmarkStorage::Memory { results } => Ok(results
625 .iter()
626 .filter(|r| r.test_case == test_name)
627 .cloned()
628 .collect()),
629 BenchmarkStorage::File { path } => {
630 let file = File::open(path)
631 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
632
633 let reader = BufReader::new(file);
634 let mut results = Vec::new();
635
636 for line in reader.lines() {
637 let line = line.map_err(|e| {
638 SklearsError::InvalidInput(format!("Failed to read line: {e}"))
639 })?;
640 let result: BenchmarkResult = serde_json::from_str(&line).map_err(|e| {
641 SklearsError::InvalidInput(format!("Failed to parse result: {e}"))
642 })?;
643
644 if result.test_case == test_name {
645 results.push(result);
646 }
647 }
648
649 Ok(results)
650 }
651 BenchmarkStorage::Database { .. } => Err(SklearsError::NotImplemented(
652 "Database storage not implemented".to_string(),
653 )),
654 }
655 }
656
657 fn get_all_results(&self) -> SklResult<Vec<BenchmarkResult>> {
658 match &self.storage {
659 BenchmarkStorage::Memory { results } => Ok(results.clone()),
660 BenchmarkStorage::File { path } => {
661 let file = File::open(path)
662 .map_err(|e| SklearsError::InvalidInput(format!("Failed to open file: {e}")))?;
663
664 let reader = BufReader::new(file);
665 let mut results = Vec::new();
666
667 for line in reader.lines() {
668 let line = line.map_err(|e| {
669 SklearsError::InvalidInput(format!("Failed to read line: {e}"))
670 })?;
671 let result: BenchmarkResult = serde_json::from_str(&line).map_err(|e| {
672 SklearsError::InvalidInput(format!("Failed to parse result: {e}"))
673 })?;
674 results.push(result);
675 }
676
677 Ok(results)
678 }
679 BenchmarkStorage::Database { .. } => Err(SklearsError::NotImplemented(
680 "Database storage not implemented".to_string(),
681 )),
682 }
683 }
684
685 fn detect_performance_regression(&self, results: &[BenchmarkResult]) -> SklResult<bool> {
687 if results.len() < 2 {
688 return Ok(false);
689 }
690
691 let recent = &results[results.len() - 1];
692 let baseline = &results[results.len() - 2];
693
694 let regression_ratio = recent.metrics.execution_time.mean.as_secs_f64()
695 / baseline.metrics.execution_time.mean.as_secs_f64();
696
697 Ok(regression_ratio > (1.0 + self.regression_thresholds.relative_threshold))
698 }
699
700 fn calculate_regression_severity(
701 &self,
702 results: &[BenchmarkResult],
703 ) -> SklResult<RegressionSeverity> {
704 if results.len() < 2 {
705 return Ok(RegressionSeverity::None);
706 }
707
708 let recent = &results[results.len() - 1];
709 let baseline = &results[results.len() - 2];
710
711 let regression_ratio = recent.metrics.execution_time.mean.as_secs_f64()
712 / baseline.metrics.execution_time.mean.as_secs_f64();
713
714 match regression_ratio {
715 r if r < 1.05 => Ok(RegressionSeverity::None),
716 r if r < 1.15 => Ok(RegressionSeverity::Minor),
717 r if r < 1.3 => Ok(RegressionSeverity::Moderate),
718 r if r < 1.5 => Ok(RegressionSeverity::Severe),
719 _ => Ok(RegressionSeverity::Critical),
720 }
721 }
722
723 fn identify_affected_metrics(&self, _results: &[BenchmarkResult]) -> SklResult<Vec<String>> {
724 Ok(vec!["execution_time".to_string()])
726 }
727
728 fn calculate_statistical_significance(&self, _results: &[BenchmarkResult]) -> SklResult<f64> {
729 Ok(0.05)
731 }
732
733 fn calculate_effect_size(&self, _results: &[BenchmarkResult]) -> SklResult<f64> {
734 Ok(0.5)
736 }
737
738 fn calculate_confidence_interval(&self, _results: &[BenchmarkResult]) -> SklResult<(f64, f64)> {
739 Ok((0.1, 0.3))
741 }
742
743 fn generate_detailed_analysis(&self, _results: &[BenchmarkResult]) -> SklResult<String> {
744 Ok(
745 "Performance analysis complete. Minor regression detected in execution time."
746 .to_string(),
747 )
748 }
749
750 fn generate_recommendations(
751 &self,
752 _results: &[BenchmarkResult],
753 severity: &RegressionSeverity,
754 ) -> Vec<String> {
755 match severity {
756 RegressionSeverity::None => vec!["Performance is stable".to_string()],
757 RegressionSeverity::Minor => vec![
758 "Monitor performance in future releases".to_string(),
759 "Consider profiling to identify optimization opportunities".to_string(),
760 ],
761 RegressionSeverity::Moderate => vec![
762 "Investigate recent changes that may have caused regression".to_string(),
763 "Run detailed profiling to identify bottlenecks".to_string(),
764 "Consider reverting problematic changes".to_string(),
765 ],
766 RegressionSeverity::Severe | RegressionSeverity::Critical => vec![
767 "Immediate investigation required".to_string(),
768 "Consider blocking release until regression is fixed".to_string(),
769 "Run comprehensive profiling and analysis".to_string(),
770 "Review all recent changes".to_string(),
771 ],
772 }
773 }
774}
775
776pub struct PerformanceReport {
778 pub results: Vec<BenchmarkResult>,
780 pub summary: ReportSummary,
782 pub trends: TrendAnalysis,
784 pub regressions: Vec<RegressionAlert>,
786}
787
788#[derive(Clone, Debug)]
790pub struct ReportSummary {
791 pub total_benchmarks: usize,
793 pub test_cases: usize,
795 pub time_range: (DateTime<Utc>, DateTime<Utc>),
797 pub average_metrics: PerformanceMetrics,
799}
800
801#[derive(Clone, Debug)]
803pub struct TrendAnalysis {
804 pub trends_by_test: HashMap<String, PerformanceTrend>,
806 pub overall_trend: PerformanceTrend,
808}
809
810#[derive(Clone, Debug)]
812pub enum PerformanceTrend {
813 Improving { rate: f64 },
815 Stable,
817 Degrading { rate: f64 },
819 Insufficient,
821}
822
823#[derive(Clone, Debug)]
825pub struct RegressionAlert {
826 pub test_case: String,
828 pub severity: RegressionSeverity,
830 pub description: String,
832 pub detected_at: DateTime<Utc>,
834}
835
836impl PerformanceReport {
837 #[must_use]
838 pub fn new(results: Vec<BenchmarkResult>, _config: &StatisticalAnalysisConfig) -> Self {
839 let summary = ReportSummary::from_results(&results);
840 let trends = TrendAnalysis::from_results(&results);
841 let regressions = Self::detect_regressions(&results);
842
843 Self {
844 results,
845 summary,
846 trends,
847 regressions,
848 }
849 }
850
851 fn detect_regressions(results: &[BenchmarkResult]) -> Vec<RegressionAlert> {
852 vec![]
854 }
855}
856
857impl ReportSummary {
858 fn from_results(results: &[BenchmarkResult]) -> Self {
859 let total_benchmarks = results.len();
860 let test_cases = results
861 .iter()
862 .map(|r| r.test_case.clone())
863 .collect::<std::collections::HashSet<_>>()
864 .len();
865
866 let (start_time, end_time) = if results.is_empty() {
867 (Utc::now(), Utc::now())
868 } else {
869 let start = results.iter().map(|r| r.timestamp).min().unwrap();
870 let end = results.iter().map(|r| r.timestamp).max().unwrap();
871 (start, end)
872 };
873
874 let average_metrics = PerformanceMetrics {
876 execution_time: TimeStatistics {
877 mean: Duration::from_millis(100),
878 median: Duration::from_millis(95),
879 std_dev: Duration::from_millis(10),
880 min: Duration::from_millis(80),
881 max: Duration::from_millis(150),
882 p95: Duration::from_millis(130),
883 p99: Duration::from_millis(145),
884 samples: vec![],
885 },
886 memory_usage: MemoryStatistics {
887 peak_usage: 1024 * 1024,
888 average_usage: 512 * 1024,
889 allocations: 100,
890 deallocations: 95,
891 fragmentation_score: 0.1,
892 },
893 throughput: ThroughputMetrics {
894 ops_per_second: 100.0,
895 samples_per_second: 1000.0,
896 features_per_second: 10000.0,
897 bytes_per_second: 80000.0,
898 },
899 cpu_utilization: CpuStatistics {
900 average_utilization: 0.75,
901 peak_utilization: 0.95,
902 user_time: Duration::from_millis(100),
903 kernel_time: Duration::from_millis(10),
904 },
905 custom_metrics: HashMap::new(),
906 };
907
908 Self {
909 total_benchmarks,
910 test_cases,
911 time_range: (start_time, end_time),
912 average_metrics,
913 }
914 }
915}
916
917impl TrendAnalysis {
918 fn from_results(_results: &[BenchmarkResult]) -> Self {
919 Self {
920 trends_by_test: HashMap::new(),
921 overall_trend: PerformanceTrend::Stable,
922 }
923 }
924}
925
926impl Default for StatisticalAnalysisConfig {
928 fn default() -> Self {
929 Self {
930 confidence_level: 0.95,
931 min_samples_for_trend: 5,
932 rolling_window_size: 10,
933 statistical_tests: vec![StatisticalTest::TTest, StatisticalTest::LinearRegression],
934 outlier_detection: OutlierDetection::IQR { multiplier: 1.5 },
935 }
936 }
937}
938
939impl Default for EnvironmentConfig {
940 fn default() -> Self {
941 Self {
942 warmup_iterations: 5,
943 measurement_iterations: 10,
944 cpu_affinity: None,
945 memory_limit: None,
946 capture_env_vars: vec!["RUST_VERSION".to_string(), "CARGO_PKG_VERSION".to_string()],
947 collect_system_info: true,
948 }
949 }
950}
951
952impl Default for RegressionThresholds {
953 fn default() -> Self {
954 Self {
955 relative_threshold: 0.05, absolute_threshold: Duration::from_millis(10),
957 memory_threshold: 1024 * 1024, throughput_threshold: 0.05, }
960 }
961}
962
963impl Default for ProfilingConfig {
964 fn default() -> Self {
965 Self {
966 cpu_profiling: false,
967 memory_profiling: false,
968 sampling_frequency: Duration::from_millis(1),
969 output_directory: None,
970 detailed_call_stacks: false,
971 }
972 }
973}
974
975#[allow(non_snake_case)]
976#[cfg(test)]
977mod tests {
978 use super::*;
979
980 #[test]
981 fn test_performance_tester_creation() {
982 let tester = PerformanceRegressionTester::new();
983 assert!(matches!(tester.storage, BenchmarkStorage::Memory { .. }));
984 }
985
986 #[test]
987 fn test_file_storage_creation() {
988 let tester = PerformanceRegressionTester::with_file_storage("/tmp/benchmarks.jsonl");
989 assert!(matches!(tester.storage, BenchmarkStorage::File { .. }));
990 }
991
992 #[test]
993 fn test_time_statistics_calculation() {
994 let tester = PerformanceRegressionTester::new();
995 let times = vec![
996 Duration::from_millis(100),
997 Duration::from_millis(110),
998 Duration::from_millis(95),
999 Duration::from_millis(105),
1000 Duration::from_millis(120),
1001 ];
1002
1003 let stats = tester.calculate_time_statistics(×);
1004 assert_eq!(stats.min, Duration::from_millis(95));
1005 assert_eq!(stats.max, Duration::from_millis(120));
1006 assert_eq!(stats.samples.len(), 5);
1007 }
1008
1009 #[test]
1010 fn test_benchmark_component() {
1011 let mut tester = PerformanceRegressionTester::new();
1012
1013 let test_function = |x: i32| x * 2;
1014 let context = BenchmarkContext {
1015 data_size: (1000, 10),
1016 iterations: 5,
1017 config: HashMap::new(),
1018 random_seed: 42,
1019 };
1020
1021 let result = tester.benchmark_component(&test_function, 42, &context, "test_multiply");
1022 assert!(result.is_ok());
1023
1024 let benchmark_result = result.unwrap();
1025 assert_eq!(benchmark_result.test_case, "test_multiply");
1026 assert!(!benchmark_result.metrics.execution_time.samples.is_empty());
1027 }
1028
1029 #[test]
1030 fn test_regression_severity_ordering() {
1031 assert!(RegressionSeverity::Critical > RegressionSeverity::Severe);
1032 assert!(RegressionSeverity::Severe > RegressionSeverity::Moderate);
1033 assert!(RegressionSeverity::Moderate > RegressionSeverity::Minor);
1034 assert!(RegressionSeverity::Minor > RegressionSeverity::None);
1035 }
1036
1037 #[test]
1038 fn test_throughput_calculation() {
1039 let tester = PerformanceRegressionTester::new();
1040 let time_stats = TimeStatistics {
1041 mean: Duration::from_millis(100),
1042 median: Duration::from_millis(100),
1043 std_dev: Duration::from_millis(5),
1044 min: Duration::from_millis(90),
1045 max: Duration::from_millis(110),
1046 p95: Duration::from_millis(108),
1047 p99: Duration::from_millis(110),
1048 samples: vec![],
1049 };
1050
1051 let context = BenchmarkContext {
1052 data_size: (1000, 10),
1053 iterations: 10,
1054 config: HashMap::new(),
1055 random_seed: 42,
1056 };
1057
1058 let throughput = tester.calculate_throughput(&time_stats, &context);
1059 assert_eq!(throughput.ops_per_second, 10.0); assert_eq!(throughput.samples_per_second, 10000.0); }
1062}