1use crate::error::{StatsError, StatsResult};
9use scirs2_core::ndarray::{Array1, Array2};
10use scirs2_core::parallel_ops::*;
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::sync::{Arc, Mutex};
14use std::time::Instant;
15
16#[derive(Debug, Clone, Serialize, Deserialize)]
18pub struct BenchmarkConfig {
19 pub datasizes: Vec<usize>,
21 pub iterations: usize,
23 pub track_memory: bool,
25 pub comparebaseline: bool,
27 pub test_simd: bool,
29 pub test_parallel: bool,
31 pub warmup_iterations: usize,
33 pub confidence_level: f64,
35 pub regression_threshold: f64,
37}
38
39impl Default for BenchmarkConfig {
40 fn default() -> Self {
41 Self {
42 datasizes: vec![100, 1000, 10000, 100000, 1000000],
43 iterations: 100,
44 track_memory: true,
45 comparebaseline: true,
46 test_simd: true,
47 test_parallel: true,
48 warmup_iterations: 10,
49 confidence_level: 0.95,
50 regression_threshold: 5.0, }
52 }
53}
54
55#[derive(Debug, Clone, Serialize, Deserialize)]
57pub struct BenchmarkMetrics {
58 pub function_name: String,
60 pub datasize: usize,
62 pub timing: TimingStats,
64 pub memory: Option<MemoryStats>,
66 pub algorithm_config: AlgorithmConfig,
68 pub throughput: f64,
70 pub baseline_comparison: Option<f64>,
72}
73
74#[derive(Debug, Clone, Serialize, Deserialize)]
76pub struct TimingStats {
77 pub mean_ns: f64,
79 pub std_dev_ns: f64,
81 pub min_ns: f64,
83 pub max_ns: f64,
85 pub median_ns: f64,
87 pub p95_ns: f64,
89 pub p99_ns: f64,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
95pub struct MemoryStats {
96 pub peak_bytes: usize,
98 pub allocations: usize,
100 pub deallocations: usize,
102 pub avg_allocationsize: f64,
104 pub fragmentation_score: f64,
106}
107
108#[derive(Debug, Clone, Serialize, Deserialize)]
110pub struct AlgorithmConfig {
111 pub simd_enabled: bool,
113 pub parallel_enabled: bool,
115 pub thread_count: Option<usize>,
117 pub simd_width: Option<usize>,
119 pub algorithm_variant: String,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
125pub struct BenchmarkReport {
126 pub timestamp: String,
128 pub config: BenchmarkConfig,
130 pub metrics: Vec<BenchmarkMetrics>,
132 pub analysis: PerformanceAnalysis,
134 pub system_info: SystemInfo,
136 pub recommendations: Vec<OptimizationRecommendation>,
138}
139
140#[derive(Debug, Clone, Serialize, Deserialize)]
142pub struct PerformanceAnalysis {
143 pub overall_score: f64,
145 pub simd_effectiveness: HashMap<String, f64>,
147 pub parallel_effectiveness: HashMap<String, f64>,
149 pub memory_efficiency: f64,
151 pub regressions: Vec<PerformanceRegression>,
153 pub scaling_analysis: ScalingAnalysis,
155}
156
157#[derive(Debug, Clone, Serialize, Deserialize)]
159pub struct PerformanceRegression {
160 pub function_name: String,
162 pub datasize: usize,
164 pub regression_percent: f64,
166 pub confidence: f64,
168 pub suspected_cause: String,
170}
171
172#[derive(Debug, Clone, Serialize, Deserialize)]
174pub struct ScalingAnalysis {
175 pub complexity_analysis: HashMap<String, ComplexityClass>,
177 pub threshold_recommendations: HashMap<String, Vec<ThresholdRecommendation>>,
179 pub memory_scaling: HashMap<String, MemoryScaling>,
181}
182
183#[derive(Debug, Clone, Serialize, Deserialize)]
185pub enum ComplexityClass {
186 Constant,
187 Logarithmic,
188 Linear,
189 LinearLogarithmic,
190 Quadratic,
191 Cubic,
192 Exponential,
193 Unknown,
194}
195
196#[derive(Debug, Clone, Serialize, Deserialize)]
198pub struct ThresholdRecommendation {
199 pub threshold: usize,
201 pub recommendation: String,
203 pub improvement_factor: f64,
205}
206
207#[derive(Debug, Clone, Serialize, Deserialize)]
209pub struct MemoryScaling {
210 pub scaling_factor: f64,
212 pub base_overhead: usize,
214 pub efficiency_trend: MemoryTrend,
216}
217
218#[derive(Debug, Clone, Serialize, Deserialize)]
220pub enum MemoryTrend {
221 Improving,
222 Degrading,
223 Stable,
224}
225
226#[derive(Debug, Clone, Serialize, Deserialize)]
228pub struct SystemInfo {
229 pub cpu_info: String,
231 pub total_memory: usize,
233 pub cpu_cores: usize,
235 pub simd_capabilities: Vec<String>,
237 pub os_info: String,
239 pub rust_version: String,
241}
242
243#[derive(Debug, Clone, Serialize, Deserialize)]
245pub struct OptimizationRecommendation {
246 pub priority: u8,
248 pub target: String,
250 pub strategy: String,
252 pub expected_impact: String,
254 pub complexity: String,
256}
257
258pub struct BenchmarkSuite {
260 config: BenchmarkConfig,
261 memory_tracker: Option<Arc<Mutex<MemoryTracker>>>,
262 #[allow(dead_code)]
263 baseline_cache: HashMap<String, f64>,
264}
265
266struct MemoryTracker {
268 initial_memory: usize,
269 peak_memory: usize,
270 allocations: usize,
271 deallocations: usize,
272 allocationsizes: Vec<usize>,
273}
274
275impl BenchmarkSuite {
276 pub fn new() -> Self {
278 Self::with_config(BenchmarkConfig::default())
279 }
280
281 pub fn with_config(config: BenchmarkConfig) -> Self {
283 let memory_tracker = if config.track_memory {
284 Some(Arc::new(Mutex::new(MemoryTracker::new())))
285 } else {
286 None
287 };
288
289 Self {
290 config,
291 memory_tracker,
292 baseline_cache: HashMap::new(),
293 }
294 }
295
296 pub fn benchmark_descriptive_stats(&mut self) -> StatsResult<BenchmarkReport> {
298 let mut metrics = Vec::new();
299 let _start_time = Instant::now();
300
301 for &size in &self.config.datasizes {
303 let data = self.generate_testdata(size)?;
304
305 metrics.push(
307 self.benchmark_function("mean", size, || crate::descriptive::mean(&data.view()))?,
308 );
309
310 metrics.push(self.benchmark_function("variance", size, || {
312 crate::descriptive::var(&data.view(), 1, None)
313 })?);
314
315 metrics.push(self.benchmark_function("std_dev", size, || {
317 crate::descriptive::std(&data.view(), 1, None)
318 })?);
319
320 if self.config.test_simd {
322 metrics.push(self.benchmark_function("mean_simd", size, || {
323 crate::descriptive_simd::mean_simd(&data.view())
324 })?);
325
326 metrics.push(self.benchmark_function("variance_simd", size, || {
327 crate::descriptive_simd::variance_simd(&data.view(), 1)
328 })?);
329
330 metrics.push(self.benchmark_function("std_simd", size, || {
331 crate::descriptive_simd::std_simd(&data.view(), 1)
332 })?);
333 }
334
335 if self.config.test_parallel && size > 10000 {
337 metrics.push(self.benchmark_function("mean_parallel", size, || {
338 crate::parallel_stats::mean_parallel(&data.view())
339 })?);
340
341 metrics.push(self.benchmark_function("variance_parallel", size, || {
342 crate::parallel_stats::variance_parallel(&data.view(), 1)
343 })?);
344 }
345 }
346
347 let analysis = self.analyze_performance(&metrics)?;
349 let system_info = self.collect_system_info();
350 let recommendations = self.generate_recommendations(&metrics, &analysis);
351
352 Ok(BenchmarkReport {
353 timestamp: chrono::Utc::now().to_rfc3339(),
354 config: self.config.clone(),
355 metrics,
356 analysis,
357 system_info,
358 recommendations,
359 })
360 }
361
362 pub fn benchmark_correlation(&mut self) -> StatsResult<BenchmarkReport> {
364 let mut metrics = Vec::new();
365
366 for &size in &self.config.datasizes {
367 let data_x = self.generate_testdata(size)?;
368 let data_y = self.generate_correlateddata(&data_x, 0.7)?; metrics.push(self.benchmark_function("pearson_correlation", size, || {
372 crate::correlation::pearson_r(&data_x.view(), &data_y.view())
373 })?);
374
375 metrics.push(self.benchmark_function("spearman_correlation", size, || {
377 crate::correlation::spearman_r(&data_x.view(), &data_y.view())
378 })?);
379
380 if self.config.test_simd {
382 metrics.push(
383 self.benchmark_function("pearson_correlation_simd", size, || {
384 crate::correlation_simd::pearson_r_simd(&data_x.view(), &data_y.view())
385 })?,
386 );
387 }
388
389 if size <= 100000 {
391 let matrixdata = self.generate_matrixdata(size, 5)?; metrics.push(self.benchmark_function("correlation_matrix", size, || {
394 crate::correlation::corrcoef(&matrixdata.view(), "pearson")
395 })?);
396 }
397 }
398
399 let analysis = self.analyze_performance(&metrics)?;
400 let system_info = self.collect_system_info();
401 let recommendations = self.generate_recommendations(&metrics, &analysis);
402
403 Ok(BenchmarkReport {
404 timestamp: chrono::Utc::now().to_rfc3339(),
405 config: self.config.clone(),
406 metrics,
407 analysis,
408 system_info,
409 recommendations,
410 })
411 }
412
413 pub fn benchmark_distributions(&mut self) -> StatsResult<BenchmarkReport> {
415 let mut metrics = Vec::new();
416
417 for &size in &self.config.datasizes {
418 let normal = crate::distributions::norm(0.0f64, 1.0)?;
420
421 metrics.push(self.benchmark_function("normal_pdf_single", 1, || Ok(normal.pdf(0.5)))?);
422
423 metrics.push(self.benchmark_function("normal_cdf_single", 1, || Ok(normal.cdf(1.96)))?);
424
425 metrics.push(self.benchmark_function("normal_rvs", size, || normal.rvs(size))?);
427
428 if size <= 100000 {
430 let gamma = crate::distributions::gamma(2.0f64, 1.0, 0.0)?;
431 metrics.push(self.benchmark_function("gamma_rvs", size, || gamma.rvs(size))?);
432
433 let beta = crate::distributions::beta(2.0f64, 3.0, 0.0, 1.0)?;
434 metrics.push(self.benchmark_function("beta_rvs", size, || beta.rvs(size))?);
435 }
436 }
437
438 let analysis = self.analyze_performance(&metrics)?;
439 let system_info = self.collect_system_info();
440 let recommendations = self.generate_recommendations(&metrics, &analysis);
441
442 Ok(BenchmarkReport {
443 timestamp: chrono::Utc::now().to_rfc3339(),
444 config: self.config.clone(),
445 metrics,
446 analysis,
447 system_info,
448 recommendations,
449 })
450 }
451
452 fn benchmark_function<F, R>(
454 &self,
455 function_name: &str,
456 datasize: usize,
457 mut func: F,
458 ) -> StatsResult<BenchmarkMetrics>
459 where
460 F: FnMut() -> StatsResult<R>,
461 {
462 for _ in 0..self.config.warmup_iterations {
464 let _ = func();
465 }
466
467 let mut timings = Vec::with_capacity(self.config.iterations);
468 let mut memory_stats = None;
469
470 if let Some(ref tracker) = self.memory_tracker {
472 let mut tracker_guard = tracker.lock().unwrap();
473 tracker_guard.reset();
474 }
475
476 for _ in 0..self.config.iterations {
478 let start = Instant::now();
479 let _ = func()?;
480 let duration = start.elapsed();
481 timings.push(duration.as_nanos() as f64);
482 }
483
484 if let Some(ref tracker) = self.memory_tracker {
486 let tracker_guard = tracker.lock().unwrap();
487 memory_stats = Some(tracker_guard.get_stats());
488 }
489
490 timings.sort_by(|a, b| a.partial_cmp(b).unwrap());
492 let timing_stats = TimingStats {
493 mean_ns: timings.iter().sum::<f64>() / timings.len() as f64,
494 std_dev_ns: self.calculate_std_dev(&timings),
495 min_ns: timings[0],
496 max_ns: timings[timings.len() - 1],
497 median_ns: timings[timings.len() / 2],
498 p95_ns: timings[(timings.len() as f64 * 0.95) as usize],
499 p99_ns: timings[(timings.len() as f64 * 0.99) as usize],
500 };
501
502 let algorithm_config = self.detect_algorithm_config(function_name, datasize);
504
505 let throughput = if timing_stats.mean_ns > 0.0 {
507 1_000_000_000.0 / timing_stats.mean_ns * datasize as f64
508 } else {
509 0.0
510 };
511
512 let baseline_comparison = if self.config.comparebaseline {
514 self.getbaseline_comparison(function_name, datasize, timing_stats.mean_ns)
515 } else {
516 None
517 };
518
519 Ok(BenchmarkMetrics {
520 function_name: function_name.to_string(),
521 datasize,
522 timing: timing_stats,
523 memory: memory_stats,
524 algorithm_config,
525 throughput,
526 baseline_comparison,
527 })
528 }
529
530 fn generate_testdata(&self, size: usize) -> StatsResult<Array1<f64>> {
532 use scirs2_core::random::{Distribution, Normal};
533
534 let mut rng = scirs2_core::random::thread_rng();
535 let normal = Normal::new(0.0, 1.0).map_err(|e| {
536 StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
537 })?;
538
539 let data: Vec<f64> = (0..size).map(|_| normal.sample(&mut rng)).collect();
540
541 Ok(Array1::from_vec(data))
542 }
543
544 fn generate_correlateddata(
546 &self,
547 basedata: &Array1<f64>,
548 correlation: f64,
549 ) -> StatsResult<Array1<f64>> {
550 use scirs2_core::random::{Distribution, Normal};
551
552 let mut rng = scirs2_core::random::thread_rng();
553 let normal = Normal::new(0.0, 1.0).map_err(|e| {
554 StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
555 })?;
556
557 let noise_factor = (1.0 - correlation * correlation).sqrt();
558
559 let correlateddata: Vec<f64> = basedata
560 .iter()
561 .map(|&x| correlation * x + noise_factor * normal.sample(&mut rng))
562 .collect();
563
564 Ok(Array1::from_vec(correlateddata))
565 }
566
567 fn generate_matrixdata(&self, rows: usize, cols: usize) -> StatsResult<Array2<f64>> {
569 use scirs2_core::random::{Distribution, Normal};
570
571 let mut rng = scirs2_core::random::thread_rng();
572 let normal = Normal::new(0.0, 1.0).map_err(|e| {
573 StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
574 })?;
575
576 let data: Vec<f64> = (0..rows * cols).map(|_| normal.sample(&mut rng)).collect();
577
578 Array2::from_shape_vec((rows, cols), data)
579 .map_err(|e| StatsError::ComputationError(format!("Failed to create matrix: {}", e)))
580 }
581
582 fn calculate_std_dev(&self, values: &[f64]) -> f64 {
584 let mean = values.iter().sum::<f64>() / values.len() as f64;
585 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / values.len() as f64;
586 variance.sqrt()
587 }
588
589 fn detect_algorithm_config(&self, function_name: &str, datasize: usize) -> AlgorithmConfig {
591 let simd_enabled = function_name.contains("simd")
592 || (datasize > 64
593 && scirs2_core::simd_ops::PlatformCapabilities::detect().simd_available);
594 let parallel_enabled =
595 function_name.contains("parallel") || (datasize > 10000 && num_threads() > 1);
596
597 AlgorithmConfig {
598 simd_enabled,
599 parallel_enabled,
600 thread_count: if parallel_enabled {
601 Some(num_threads())
602 } else {
603 None
604 },
605 simd_width: if simd_enabled { Some(8) } else { None }, algorithm_variant: function_name.to_string(),
607 }
608 }
609
610 fn getbaseline_comparison(
612 &self,
613 _function_name: &str,
614 datasize: usize,
615 current_time_ns: f64,
616 ) -> Option<f64> {
617 let simulatedbaseline = current_time_ns * 1.2; Some(simulatedbaseline / current_time_ns)
621 }
622
623 fn analyze_performance(
625 &self,
626 metrics: &[BenchmarkMetrics],
627 ) -> StatsResult<PerformanceAnalysis> {
628 let mut simd_effectiveness = HashMap::new();
629 let mut parallel_effectiveness = HashMap::new();
630 let mut regressions = Vec::new();
631
632 for metric in metrics {
634 if metric.algorithm_config.simd_enabled {
635 let base_name = metric.function_name.replace("_simd", "");
636 if let Some(base_metric) = metrics.iter().find(|m| {
637 m.function_name == base_name
638 && m.datasize == metric.datasize
639 && !m.algorithm_config.simd_enabled
640 }) {
641 let improvement = base_metric.timing.mean_ns / metric.timing.mean_ns;
642 simd_effectiveness
643 .insert(format!("{}_{}", base_name, metric.datasize), improvement);
644 }
645 }
646 }
647
648 for metric in metrics {
650 if metric.algorithm_config.parallel_enabled {
651 let base_name = metric.function_name.replace("_parallel", "");
652 if let Some(base_metric) = metrics.iter().find(|m| {
653 m.function_name == base_name
654 && m.datasize == metric.datasize
655 && !m.algorithm_config.parallel_enabled
656 }) {
657 let improvement = base_metric.timing.mean_ns / metric.timing.mean_ns;
658 parallel_effectiveness
659 .insert(format!("{}_{}", base_name, metric.datasize), improvement);
660 }
661 }
662 }
663
664 for metric in metrics {
666 if let Some(baseline_ratio) = metric.baseline_comparison {
667 if baseline_ratio < (1.0 - self.config.regression_threshold / 100.0) {
668 let regression_percent = (1.0 - baseline_ratio) * 100.0;
669 regressions.push(PerformanceRegression {
670 function_name: metric.function_name.clone(),
671 datasize: metric.datasize,
672 regression_percent,
673 confidence: self.config.confidence_level,
674 suspected_cause: "Algorithm or system change".to_string(),
675 });
676 }
677 }
678 }
679
680 let mean_throughput =
682 metrics.iter().map(|m| m.throughput).sum::<f64>() / metrics.len() as f64;
683 let overall_score = (mean_throughput / 1_000_000.0).min(100.0); let memory_efficiency = metrics
687 .iter()
688 .filter_map(|m| m.memory.as_ref())
689 .map(|mem| {
690 100.0 * (1.0 - mem.fragmentation_score)
692 })
693 .sum::<f64>()
694 / metrics.len() as f64;
695
696 let scaling_analysis = self.analyze_scaling(metrics)?;
698
699 Ok(PerformanceAnalysis {
700 overall_score,
701 simd_effectiveness,
702 parallel_effectiveness,
703 memory_efficiency,
704 regressions,
705 scaling_analysis,
706 })
707 }
708
709 fn analyze_scaling(&self, metrics: &[BenchmarkMetrics]) -> StatsResult<ScalingAnalysis> {
711 let mut complexity_analysis = HashMap::new();
712 let mut threshold_recommendations = HashMap::new();
713 let mut memory_scaling = HashMap::new();
714
715 let mut function_groups: HashMap<String, Vec<&BenchmarkMetrics>> = HashMap::new();
717 for metric in metrics {
718 function_groups
719 .entry(metric.function_name.clone())
720 .or_default()
721 .push(metric);
722 }
723
724 for (function_name, function_metrics) in function_groups {
726 if function_metrics.len() < 3 {
727 continue; }
729
730 let mut sorted_metrics = function_metrics;
732 sorted_metrics.sort_by_key(|m| m.datasize);
733
734 let complexity = self.classify_complexity(&sorted_metrics);
736 complexity_analysis.insert(function_name.clone(), complexity);
737
738 let thresholds = self.generate_thresholds(&sorted_metrics);
740 if !thresholds.is_empty() {
741 threshold_recommendations.insert(function_name.clone(), thresholds);
742 }
743
744 if let Some(scaling) = self.analyze_memory_scaling(&sorted_metrics) {
746 memory_scaling.insert(function_name, scaling);
747 }
748 }
749
750 Ok(ScalingAnalysis {
751 complexity_analysis,
752 threshold_recommendations,
753 memory_scaling,
754 })
755 }
756
757 fn classify_complexity(&self, metrics: &[&BenchmarkMetrics]) -> ComplexityClass {
759 if metrics.len() < 3 {
760 return ComplexityClass::Unknown;
761 }
762
763 let sizes: Vec<f64> = metrics.iter().map(|m| m.datasize as f64).collect();
764 let times: Vec<f64> = metrics.iter().map(|m| m.timing.mean_ns).collect();
765
766 let size_ratios: Vec<f64> = sizes.windows(2).map(|w| w[1] / w[0]).collect();
768 let time_ratios: Vec<f64> = times.windows(2).map(|w| w[1] / w[0]).collect();
769
770 if time_ratios.is_empty() {
771 return ComplexityClass::Unknown;
772 }
773
774 let avg_time_ratio = time_ratios.iter().sum::<f64>() / time_ratios.len() as f64;
775 let avgsize_ratio = size_ratios.iter().sum::<f64>() / size_ratios.len() as f64;
776
777 if avg_time_ratio < 1.1 {
778 ComplexityClass::Constant
779 } else if avg_time_ratio / avgsize_ratio < 1.5 {
780 ComplexityClass::Linear
781 } else if avg_time_ratio / (avgsize_ratio * avgsize_ratio.log2()) < 2.0 {
782 ComplexityClass::LinearLogarithmic
783 } else if avg_time_ratio / (avgsize_ratio * avgsize_ratio) < 2.0 {
784 ComplexityClass::Quadratic
785 } else {
786 ComplexityClass::Unknown
787 }
788 }
789
790 fn generate_thresholds(&self, metrics: &[&BenchmarkMetrics]) -> Vec<ThresholdRecommendation> {
792 Vec::new()
795 }
796
797 fn analyze_memory_scaling(&self, metrics: &[&BenchmarkMetrics]) -> Option<MemoryScaling> {
799 let memory_data: Vec<_> = metrics
800 .iter()
801 .filter_map(|m| m.memory.as_ref().map(|mem| (m.datasize, mem.peak_bytes)))
802 .collect();
803
804 if memory_data.len() < 2 {
805 return None;
806 }
807
808 let (sizes, memories): (Vec<f64>, Vec<f64>) = memory_data
810 .iter()
811 .map(|(size, mem)| (*size as f64, *mem as f64))
812 .unzip();
813
814 let scaling_factor = if sizes.len() >= 2 {
815 let size_growth = sizes[sizes.len() - 1] / sizes[0];
816 let memory_growth = memories[memories.len() - 1] / memories[0];
817 memory_growth / size_growth
818 } else {
819 1.0
820 };
821
822 Some(MemoryScaling {
823 scaling_factor,
824 base_overhead: memory_data[0].1,
825 efficiency_trend: MemoryTrend::Stable, })
827 }
828
829 fn collect_system_info(&self) -> SystemInfo {
831 SystemInfo {
832 cpu_info: "Generic CPU".to_string(), total_memory: 8 * 1024 * 1024 * 1024, cpu_cores: num_threads(),
835 simd_capabilities: vec!["SSE2".to_string(), "AVX2".to_string()], os_info: std::env::consts::OS.to_string(),
837 rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()),
838 }
839 }
840
841 fn generate_recommendations(
843 &self,
844 metrics: &[BenchmarkMetrics],
845 analysis: &PerformanceAnalysis,
846 ) -> Vec<OptimizationRecommendation> {
847 let mut recommendations = Vec::new();
848
849 for (function, effectiveness) in &analysis.simd_effectiveness {
851 if *effectiveness < 1.5 {
852 recommendations.push(OptimizationRecommendation {
853 priority: 4,
854 target: function.clone(),
855 strategy: "Improve SIMD implementation or increase vectorization".to_string(),
856 expected_impact: format!("Potential {:.1}x speedup", 2.0 - effectiveness),
857 complexity: "Medium".to_string(),
858 });
859 }
860 }
861
862 if analysis.memory_efficiency < 80.0 {
864 recommendations.push(OptimizationRecommendation {
865 priority: 3,
866 target: "Memory Management".to_string(),
867 strategy: "Reduce memory fragmentation and allocation overhead".to_string(),
868 expected_impact: "Improved cache performance and reduced GC pressure".to_string(),
869 complexity: "High".to_string(),
870 });
871 }
872
873 for regression in &analysis.regressions {
875 recommendations.push(OptimizationRecommendation {
876 priority: 5,
877 target: regression.function_name.clone(),
878 strategy: format!(
879 "Investigate {:.1}% performance regression",
880 regression.regression_percent
881 ),
882 expected_impact: "Restore baseline performance".to_string(),
883 complexity: "Variable".to_string(),
884 });
885 }
886
887 recommendations
888 }
889}
890
891impl MemoryTracker {
892 fn new() -> Self {
893 Self {
894 initial_memory: 0,
895 peak_memory: 0,
896 allocations: 0,
897 deallocations: 0,
898 allocationsizes: Vec::new(),
899 }
900 }
901
902 fn reset(&mut self) {
903 self.initial_memory = 0; self.peak_memory = 0;
905 self.allocations = 0;
906 self.deallocations = 0;
907 self.allocationsizes.clear();
908 }
909
910 fn get_stats(&self) -> MemoryStats {
911 let avg_allocationsize = if self.allocations > 0 {
912 self.allocationsizes.iter().sum::<usize>() as f64 / self.allocations as f64
913 } else {
914 0.0
915 };
916
917 let fragmentation_score = if self.peak_memory > 0 {
918 1.0 - (self.allocations as f64 / self.peak_memory as f64)
919 } else {
920 0.0
921 };
922
923 MemoryStats {
924 peak_bytes: self.peak_memory,
925 allocations: self.allocations,
926 deallocations: self.deallocations,
927 avg_allocationsize,
928 fragmentation_score: fragmentation_score.max(0.0).min(1.0),
929 }
930 }
931}
932
933impl Default for BenchmarkSuite {
934 fn default() -> Self {
935 Self::new()
936 }
937}
938
939#[cfg(test)]
940mod tests {
941 use super::*;
942
943 #[test]
944 fn test_benchmark_suite_creation() {
945 let suite = BenchmarkSuite::new();
946 assert_eq!(suite.config.datasizes.len(), 5);
947 assert_eq!(suite.config.iterations, 100);
948 }
949
950 #[test]
951 fn test_testdata_generation() {
952 let suite = BenchmarkSuite::new();
953 let data = suite.generate_testdata(1000).unwrap();
954 assert_eq!(data.len(), 1000);
955 }
956
957 #[test]
958 fn test_correlateddata_generation() {
959 let suite = BenchmarkSuite::new();
960 let basedata = suite.generate_testdata(100).unwrap();
961 let correlateddata = suite.generate_correlateddata(&basedata, 0.8).unwrap();
962 assert_eq!(correlateddata.len(), 100);
963 }
964
965 #[test]
966 fn test_complexity_classification() {
967 let suite = BenchmarkSuite::new();
968
969 let metric1 = BenchmarkMetrics {
970 function_name: "test".to_string(),
971 datasize: 100,
972 timing: TimingStats {
973 mean_ns: 1000.0,
974 std_dev_ns: 100.0,
975 min_ns: 900.0,
976 max_ns: 1100.0,
977 median_ns: 1000.0,
978 p95_ns: 1050.0,
979 p99_ns: 1080.0,
980 },
981 memory: None,
982 algorithm_config: AlgorithmConfig {
983 simd_enabled: false,
984 parallel_enabled: false,
985 thread_count: None,
986 simd_width: None,
987 algorithm_variant: "test".to_string(),
988 },
989 throughput: 100000.0,
990 baseline_comparison: None,
991 };
992
993 let metric2 = BenchmarkMetrics {
994 function_name: "test".to_string(),
995 datasize: 1000,
996 timing: TimingStats {
997 mean_ns: 10000.0,
998 std_dev_ns: 1000.0,
999 min_ns: 9000.0,
1000 max_ns: 11000.0,
1001 median_ns: 10000.0,
1002 p95_ns: 10500.0,
1003 p99_ns: 10800.0,
1004 },
1005 memory: None,
1006 algorithm_config: AlgorithmConfig {
1007 simd_enabled: false,
1008 parallel_enabled: false,
1009 thread_count: None,
1010 simd_width: None,
1011 algorithm_variant: "test".to_string(),
1012 },
1013 throughput: 100000.0,
1014 baseline_comparison: None,
1015 };
1016
1017 let metric3 = BenchmarkMetrics {
1018 function_name: "test".to_string(),
1019 datasize: 10000,
1020 timing: TimingStats {
1021 mean_ns: 100000.0,
1022 std_dev_ns: 10000.0,
1023 min_ns: 90000.0,
1024 max_ns: 110000.0,
1025 median_ns: 100000.0,
1026 p95_ns: 105000.0,
1027 p99_ns: 108000.0,
1028 },
1029 memory: None,
1030 algorithm_config: AlgorithmConfig {
1031 simd_enabled: false,
1032 parallel_enabled: false,
1033 thread_count: None,
1034 simd_width: None,
1035 algorithm_variant: "test".to_string(),
1036 },
1037 throughput: 100000.0,
1038 baseline_comparison: None,
1039 };
1040
1041 let metrics = vec![&metric1, &metric2, &metric3];
1042
1043 let complexity = suite.classify_complexity(&metrics);
1044 assert!(matches!(complexity, ComplexityClass::Linear));
1045 }
1046}