1pub mod cross_module;
13pub mod performance;
14pub mod regression;
15
16pub use cross_module::{
18 create_default_benchmark_suite, run_quick_benchmarks,
19 BenchmarkSuiteResult as CrossModuleBenchmarkSuiteResult, CrossModuleBenchConfig,
20 CrossModuleBenchmarkRunner, PerformanceMeasurement as CrossModulePerformanceMeasurement,
21};
22
23use crate::error::{CoreError, CoreResult, ErrorContext};
24use crate::performance_optimization::OptimizationStrategy;
25use std::collections::{HashMap, HashSet};
26use std::time::{Duration, Instant};
27
28#[derive(Debug, Clone)]
30pub struct BenchmarkConfig {
31 pub strategies: HashSet<OptimizationStrategy>,
33 pub sample_sizes: Vec<usize>,
35 pub warmup_iterations: usize,
37 pub measurement_iterations: usize,
39 pub measurement_time: Duration,
41 pub min_duration: Duration,
43 pub max_duration: Duration,
45 pub confidence_level: f64,
47 pub max_cv: f64,
49 pub enable_profiling: bool,
51 pub enable_memory_tracking: bool,
53 pub tags: Vec<String>,
55}
56
57impl Default for BenchmarkConfig {
58 fn default() -> Self {
59 let mut strategies = HashSet::new();
60 strategies.insert(OptimizationStrategy::Scalar);
61
62 Self {
63 strategies,
64 sample_sizes: vec![1000, 10000, 100000],
65 warmup_iterations: 10,
66 measurement_iterations: 100,
67 measurement_time: Duration::from_secs(5),
68 min_duration: Duration::from_millis(1),
69 max_duration: Duration::from_secs(30),
70 confidence_level: 0.95,
71 max_cv: 0.1, enable_profiling: false,
73 enable_memory_tracking: true,
74 tags: Vec::new(),
75 }
76 }
77}
78
79impl BenchmarkConfig {
80 pub fn new() -> Self {
82 Self::default()
83 }
84
85 pub fn with_warmup_iterations(mut self, iterations: usize) -> Self {
87 self.warmup_iterations = iterations;
88 self
89 }
90
91 pub fn with_measurement_iterations(mut self, iterations: usize) -> Self {
93 self.measurement_iterations = iterations;
94 self
95 }
96
97 pub fn with_measurement_time(mut self, time: Duration) -> Self {
99 self.measurement_time = time;
100 self
101 }
102
103 pub fn with_min_duration(mut self, duration: Duration) -> Self {
105 self.min_duration = std::time::Duration::from_secs(1);
106 self
107 }
108
109 pub fn with_max_duration(mut self, duration: Duration) -> Self {
111 self.max_duration = std::time::Duration::from_secs(1);
112 self
113 }
114
115 pub fn with_confidence_level(mut self, level: f64) -> Self {
117 self.confidence_level = level;
118 self
119 }
120
121 pub fn with_max_cv(mut self, cv: f64) -> Self {
123 self.max_cv = cv;
124 self
125 }
126
127 pub fn with_profiling(mut self, enabled: bool) -> Self {
129 self.enable_profiling = enabled;
130 self
131 }
132
133 pub fn with_memory_tracking(mut self, enabled: bool) -> Self {
135 self.enable_memory_tracking = enabled;
136 self
137 }
138
139 pub fn with_tags(mut self, tags: Vec<String>) -> Self {
141 self.tags = tags;
142 self
143 }
144
145 pub fn with_tag(mut self, tag: String) -> Self {
147 self.tags.push(tag);
148 self
149 }
150
151 pub fn with_strategies(mut self, strategies: HashSet<OptimizationStrategy>) -> Self {
153 self.strategies = strategies;
154 self
155 }
156
157 pub fn with_strategy(mut self, strategy: OptimizationStrategy) -> Self {
159 self.strategies.insert(strategy);
160 self
161 }
162
163 pub fn with_sample_sizes(mut self, sample_sizes: Vec<usize>) -> Self {
165 self.sample_sizes = sample_sizes;
166 self
167 }
168}
169
170#[derive(Debug, Clone)]
172pub struct BenchmarkMeasurement {
173 pub execution_time: Duration,
175 pub duration: Duration,
177 pub strategy: OptimizationStrategy,
179 pub input_size: usize,
181 pub throughput: f64,
183 pub memory_usage: usize,
185 pub custom_metrics: HashMap<String, f64>,
187 pub timestamp: std::time::SystemTime,
189}
190
191impl BenchmarkMeasurement {
192 pub fn new(execution_time: Duration) -> Self {
194 Self {
195 execution_time,
196 duration: execution_time,
197 strategy: OptimizationStrategy::Scalar,
198 input_size: 0,
199 throughput: 0.0,
200 memory_usage: 0,
201 custom_metrics: HashMap::new(),
202 timestamp: std::time::SystemTime::now(),
203 }
204 }
205
206 pub fn with_memory_usage(mut self, memory: usize) -> Self {
208 self.memory_usage = memory;
209 self
210 }
211
212 pub fn with_custom_metric(mut self, name: String, value: f64) -> Self {
214 self.custom_metrics.insert(name, value);
215 self
216 }
217
218 pub fn with_strategy(mut self, strategy: OptimizationStrategy) -> Self {
220 self.strategy = strategy;
221 self
222 }
223
224 pub fn with_input_size(mut self, input_size: usize) -> Self {
226 self.input_size = input_size;
227 self
228 }
229
230 pub fn with_throughput(mut self, throughput: f64) -> Self {
232 self.throughput = throughput;
233 self
234 }
235
236 pub fn execution_time_nanos(&self) -> u64 {
238 self.execution_time.as_nanos() as u64
239 }
240
241 pub fn execution_time_micros(&self) -> u64 {
243 self.execution_time.as_micros() as u64
244 }
245
246 pub fn execution_time_millis(&self) -> u64 {
248 self.execution_time.as_millis() as u64
249 }
250}
251
252#[derive(Debug, Clone)]
254pub struct BenchmarkResult {
255 pub name: String,
257 pub measurements: Vec<BenchmarkMeasurement>,
259 pub statistics: BenchmarkStatistics,
261 pub config: BenchmarkConfig,
263 pub total_time: Duration,
265 pub quality_criteria_met: bool,
267 pub warnings: Vec<String>,
269}
270
271impl BenchmarkResult {
272 pub fn new(name: String, config: BenchmarkConfig) -> Self {
274 Self {
275 name,
276 measurements: Vec::new(),
277 statistics: BenchmarkStatistics::default(),
278 config,
279 total_time: Duration::from_secs(0),
280 quality_criteria_met: false,
281 warnings: Vec::new(),
282 }
283 }
284
285 pub fn add_measurement(&mut self, measurement: BenchmarkMeasurement) {
287 self.measurements.push(measurement);
288 }
289
290 pub fn finalize(&mut self) -> CoreResult<()> {
292 if self.measurements.is_empty() {
293 return Err(CoreError::ValidationError(crate::error::ErrorContext::new(
294 "No measurements collected",
295 )));
296 }
297
298 self.statistics = BenchmarkStatistics::from_measurements(&self.measurements)?;
299
300 self.quality_criteria_met = self.statistics.coefficient_of_variation <= self.config.max_cv;
302
303 if !self.quality_criteria_met {
304 self.warnings.push(format!(
305 "High coefficient of variation: {:.3} > {:.3}",
306 self.statistics.coefficient_of_variation, self.config.max_cv
307 ));
308 }
309
310 Ok(())
311 }
312
313 pub fn get_throughput(&self, operations_periteration: u64) -> f64 {
315 let avg_time_seconds = self.statistics.mean_execution_time.as_secs_f64();
316 operations_periteration as f64 / avg_time_seconds
317 }
318
319 pub fn get_memory_efficiency(&self, operations_periteration: u64) -> f64 {
321 if self.statistics.mean_memory_usage == 0 {
322 return f64::INFINITY;
323 }
324 let memory_mb = self.statistics.mean_memory_usage as f64 / (1024.0 * 1024.0);
325 operations_periteration as f64 / memory_mb
326 }
327}
328
329#[derive(Debug, Clone, Default)]
331pub struct BenchmarkStatistics {
332 pub mean_execution_time: Duration,
334 pub median_execution_time: Duration,
336 pub std_dev_execution_time: Duration,
338 pub min_execution_time: Duration,
340 pub max_execution_time: Duration,
342 pub coefficient_of_variation: f64,
344 pub confidence_interval: (Duration, Duration),
346 pub mean_memory_usage: usize,
348 pub std_dev_memory_usage: usize,
350 pub sample_count: usize,
352}
353
354impl BenchmarkStatistics {
355 pub fn from_measurements(measurements: &[BenchmarkMeasurement]) -> CoreResult<Self> {
357 if measurements.is_empty() {
358 return Err(CoreError::ValidationError(crate::error::ErrorContext::new(
359 "Cannot compute statistics from empty measurements",
360 )));
361 }
362
363 let mut execution_times: Vec<Duration> =
365 measurements.iter().map(|m| m.execution_time).collect();
366 execution_times.sort();
367
368 let mean_nanos = execution_times
370 .iter()
371 .map(|d| d.as_nanos() as f64)
372 .sum::<f64>()
373 / execution_times.len() as f64;
374 let mean_execution_time = Duration::from_nanos(mean_nanos as u64);
375
376 let median_execution_time = if execution_times.len().is_multiple_of(2) {
377 let mid = execution_times.len() / 2;
378 Duration::from_nanos(
379 ((execution_times[mid - 1].as_nanos() + execution_times[mid].as_nanos()) / 2)
380 as u64,
381 )
382 } else {
383 execution_times[execution_times.len() / 2]
384 };
385
386 let variance = execution_times
387 .iter()
388 .map(|d| {
389 let diff = d.as_nanos() as f64 - mean_nanos;
390 diff * diff
391 })
392 .sum::<f64>()
393 / execution_times.len() as f64;
394 let std_dev_execution_time = Duration::from_nanos(variance.sqrt() as u64);
395
396 let min_execution_time = execution_times[0];
397 let max_execution_time = execution_times[execution_times.len() - 1];
398
399 let coefficient_of_variation = if mean_nanos > 0.0 {
400 (variance.sqrt()) / mean_nanos
401 } else {
402 0.0
403 };
404
405 let t_value = 1.96; let standarderror = variance.sqrt() / (execution_times.len() as f64).sqrt();
408 let margin_oferror = t_value * standarderror;
409 let confidence_interval = (
410 Duration::from_nanos((mean_nanos - margin_oferror).max(0.0) as u64),
411 Duration::from_nanos((mean_nanos + margin_oferror) as u64),
412 );
413
414 let mean_memory = measurements
416 .iter()
417 .map(|m| m.memory_usage as f64)
418 .sum::<f64>()
419 / measurements.len() as f64;
420 let memory_variance = measurements
421 .iter()
422 .map(|m| {
423 let diff = m.memory_usage as f64 - mean_memory;
424 diff * diff
425 })
426 .sum::<f64>()
427 / measurements.len() as f64;
428
429 Ok(BenchmarkStatistics {
430 mean_execution_time,
431 median_execution_time,
432 std_dev_execution_time,
433 min_execution_time,
434 max_execution_time,
435 coefficient_of_variation,
436 confidence_interval,
437 mean_memory_usage: mean_memory as usize,
438 std_dev_memory_usage: memory_variance.sqrt() as usize,
439 sample_count: measurements.len(),
440 })
441 }
442
443 pub fn is_reliable(&self, max_cv: f64) -> bool {
445 self.coefficient_of_variation <= max_cv && self.sample_count >= 10
446 }
447
448 pub fn execution_time_percentile(
450 &self,
451 measurements: &[BenchmarkMeasurement],
452 percentile: f64,
453 ) -> Duration {
454 if measurements.is_empty() {
455 return Duration::from_secs(0);
456 }
457
458 let mut times: Vec<Duration> = measurements.iter().map(|m| m.execution_time).collect();
459 times.sort();
460
461 let index = (percentile / 100.0 * (times.len() - 1) as f64).round() as usize;
462 times[index.min(times.len() - 1)]
463 }
464}
465
466pub struct BenchmarkRunner {
468 config: BenchmarkConfig,
469}
470
471impl BenchmarkRunner {
472 pub fn new(config: BenchmarkConfig) -> Self {
474 Self { config }
475 }
476
477 pub fn run<F, T>(&self, name: &str, mut benchmarkfn: F) -> CoreResult<BenchmarkResult>
479 where
480 F: FnMut() -> CoreResult<T>,
481 {
482 let total_start = Instant::now();
483 let mut result = BenchmarkResult::new(name.to_string(), self.config.clone());
484
485 for _ in 0..self.config.warmup_iterations {
487 benchmarkfn()?;
488 }
489
490 let measurement_start = Instant::now();
492 let mut iteration_count = 0;
493
494 while iteration_count < self.config.measurement_iterations
495 && measurement_start.elapsed() < self.config.measurement_time
496 {
497 let memory_before = if self.config.enable_memory_tracking {
498 self.get_memory_usage().unwrap_or(0)
499 } else {
500 0
501 };
502
503 let start = Instant::now();
504 benchmarkfn()?;
505 let execution_time = start.elapsed();
506
507 let memory_after = if self.config.enable_memory_tracking {
508 self.get_memory_usage().unwrap_or(0)
509 } else {
510 0
511 };
512
513 let memory_usage = memory_after.saturating_sub(memory_before);
514
515 result.add_measurement(
516 BenchmarkMeasurement::new(execution_time).with_memory_usage(memory_usage),
517 );
518
519 iteration_count += 1;
520 }
521
522 result.total_time = total_start.elapsed();
523 result.finalize()?;
524
525 Ok(result)
526 }
527
528 pub fn run_with_setup<F, G, H, T, S>(
530 &self,
531 name: &str,
532 mut setup: F,
533 mut benchmark_fn: G,
534 mut teardown: H,
535 ) -> CoreResult<BenchmarkResult>
536 where
537 F: FnMut() -> CoreResult<S>,
538 G: FnMut(&mut S) -> CoreResult<T>,
539 H: FnMut(S) -> CoreResult<()>,
540 {
541 let total_start = Instant::now();
542 let mut result = BenchmarkResult::new(name.to_string(), self.config.clone());
543
544 for _ in 0..self.config.warmup_iterations {
546 let mut state = setup()?;
547 benchmark_fn(&mut state)?;
548 teardown(state)?;
549 }
550
551 let measurement_start = Instant::now();
553 let mut iteration_count = 0;
554
555 while iteration_count < self.config.measurement_iterations
556 && measurement_start.elapsed() < self.config.measurement_time
557 {
558 let mut state = setup()?;
559
560 let memory_before = if self.config.enable_memory_tracking {
561 self.get_memory_usage().unwrap_or(0)
562 } else {
563 0
564 };
565
566 let start = Instant::now();
567 benchmark_fn(&mut state)?;
568 let execution_time = start.elapsed();
569
570 let memory_after = if self.config.enable_memory_tracking {
571 self.get_memory_usage().unwrap_or(0)
572 } else {
573 0
574 };
575
576 teardown(state)?;
577
578 let memory_usage = memory_after.saturating_sub(memory_before);
579
580 result.add_measurement(
581 BenchmarkMeasurement::new(execution_time).with_memory_usage(memory_usage),
582 );
583
584 iteration_count += 1;
585 }
586
587 result.total_time = total_start.elapsed();
588 result.finalize()?;
589
590 Ok(result)
591 }
592
593 pub fn run_parameterized<F, T, P>(
595 &self,
596 name: &str,
597 parameters: Vec<P>,
598 mut benchmark_fn: F,
599 ) -> CoreResult<Vec<(P, BenchmarkResult)>>
600 where
601 F: FnMut(&P) -> CoreResult<T>,
602 P: Clone + std::fmt::Debug,
603 {
604 let mut results = Vec::new();
605
606 for param in parameters {
607 let param_name = format!("{name}({param:?})");
608 let param_clone = param.clone();
609
610 let result = self.run(¶m_name, || benchmark_fn(¶m_clone))?;
611 results.push((param, result));
612 }
613
614 Ok(results)
615 }
616
617 #[allow(dead_code)]
619 pub fn benchmark_operation<F, T>(
620 &self,
621 name: &str,
622 mut operation: F,
623 ) -> CoreResult<Vec<BenchmarkMeasurement>>
624 where
625 F: FnMut(&[u8], OptimizationStrategy) -> CoreResult<T>,
626 {
627 let mut measurements = Vec::new();
628
629 let data = vec![0u8; 1000];
631
632 for strategy in &self.config.strategies {
633 let start = std::time::Instant::now();
634 let _ = operation(&data, *strategy)?;
635 let elapsed = start.elapsed();
636
637 let measurement = BenchmarkMeasurement::new(elapsed)
638 .with_strategy(*strategy)
639 .with_input_size(data.len())
640 .with_throughput(data.len() as f64 / elapsed.as_secs_f64());
641
642 measurements.push(measurement);
643 }
644
645 Ok(measurements)
646 }
647
648 fn get_memory_usage(&self) -> CoreResult<usize> {
650 #[cfg(target_os = "linux")]
651 {
652 use std::fs;
653 let status = fs::read_to_string("/proc/self/status").map_err(|e| {
654 CoreError::IoError(ErrorContext::new(format!(
655 "Failed to read memory status: {e}"
656 )))
657 })?;
658
659 for line in status.lines() {
660 if line.starts_with("VmRSS:") {
661 let parts: Vec<&str> = line.split_whitespace().collect();
662 if parts.len() >= 2 {
663 let kb: usize = parts[1].parse().map_err(|e| {
664 CoreError::ValidationError(crate::error::ErrorContext::new(format!(
665 "Failed to parse memory: {e}"
666 )))
667 })?;
668 return Ok(kb * 1024);
669 }
670 }
671 }
672 }
673
674 Ok(0)
676 }
677}
678
679type BenchmarkFn = Box<dyn Fn(&BenchmarkRunner) -> CoreResult<BenchmarkResult> + Send + Sync>;
681
682pub struct BenchmarkSuite {
684 name: String,
685 benchmarks: Vec<BenchmarkFn>,
686 config: BenchmarkConfig,
687}
688
689impl BenchmarkSuite {
690 pub fn new(name: &str, config: BenchmarkConfig) -> Self {
692 Self {
693 name: name.to_string(),
694 benchmarks: Vec::new(),
695 config,
696 }
697 }
698
699 pub fn add_benchmark<F>(&mut self, benchmark_fn: F)
701 where
702 F: Fn(&BenchmarkRunner) -> CoreResult<BenchmarkResult> + Send + Sync + 'static,
703 {
704 self.benchmarks.push(Box::new(benchmark_fn));
705 }
706
707 pub fn run(&self) -> CoreResult<Vec<BenchmarkResult>> {
709 let runner = BenchmarkRunner::new(self.config.clone());
710 let mut results = Vec::new();
711
712 println!("Running benchmark suite: {}", self.name);
713
714 for (i, benchmark) in self.benchmarks.iter().enumerate() {
715 println!("Running benchmark {}/{}", i + 1, self.benchmarks.len());
716
717 match benchmark(&runner) {
718 Ok(result) => {
719 println!(
720 " {} completed: {:.3}ms ± {:.3}ms",
721 result.name,
722 result.statistics.mean_execution_time.as_millis(),
723 result.statistics.std_dev_execution_time.as_millis()
724 );
725 results.push(result);
726 }
727 Err(e) => {
728 println!(" Benchmark failed: {e:?}");
729 return Err(e);
730 }
731 }
732 }
733
734 self.print_summary(&results);
736
737 Ok(results)
738 }
739
740 fn print_summary(&self, results: &[BenchmarkResult]) {
742 println!("\nBenchmark Suite '{}' Summary:", self.name);
743 println!("----------------------------------------");
744
745 for result in results {
746 let quality_indicator = if result.quality_criteria_met {
747 "✓"
748 } else {
749 "⚠"
750 };
751 println!(
752 "{} {}: {:.3}ms (CV: {:.2}%)",
753 quality_indicator,
754 result.name,
755 result.statistics.mean_execution_time.as_millis(),
756 result.statistics.coefficient_of_variation * 100.0
757 );
758
759 for warning in &result.warnings {
760 println!(" Warning: {warning}");
761 }
762 }
763
764 let reliable_count = results.iter().filter(|r| r.quality_criteria_met).count();
765 println!(
766 "\nReliable benchmarks: {}/{}",
767 reliable_count,
768 results.len()
769 );
770 }
771}
772
773#[derive(Debug, Clone)]
775pub struct StrategyPerformance {
776 pub strategy: OptimizationStrategy,
777 pub throughput: f64,
778 pub latency: Duration,
779 pub memory_efficiency: f64,
780 pub cache_hit_rate: f64,
781 pub avg_throughput: f64,
782 pub throughput_stddev: f64,
783 pub avg_memory_usage: f64,
784 pub optimal_size: usize,
785 pub efficiency_score: f64,
786}
787
788impl StrategyPerformance {
789 #[allow(dead_code)]
791 pub fn new(strategy: OptimizationStrategy) -> Self {
792 Self {
793 strategy,
794 throughput: 0.0,
795 latency: Duration::from_secs(0),
796 memory_efficiency: 0.0,
797 cache_hit_rate: 0.0,
798 avg_throughput: 0.0,
799 throughput_stddev: 0.0,
800 avg_memory_usage: 0.0,
801 optimal_size: 0,
802 efficiency_score: 0.0,
803 }
804 }
805}
806
807#[derive(Debug, Clone)]
809pub struct MemoryScaling {
810 pub linear_factor: f64,
811 pub logarithmic_factor: f64,
812 pub constant_overhead: usize,
813 pub linear_coefficient: f64,
814 pub constant_coefficient: f64,
815 pub r_squared: f64,
816}
817
818impl Default for MemoryScaling {
819 fn default() -> Self {
820 Self::new()
821 }
822}
823
824impl MemoryScaling {
825 #[allow(dead_code)]
827 pub fn new() -> Self {
828 Self {
829 linear_factor: 1.0,
830 logarithmic_factor: 0.0,
831 constant_overhead: 0,
832 linear_coefficient: 1.0,
833 constant_coefficient: 0.0,
834 r_squared: 1.0,
835 }
836 }
837}
838
839#[derive(Debug, Clone, PartialEq, Eq)]
841pub enum BottleneckType {
842 CpuBound,
843 MemoryBandwidth,
844 CacheMisses,
845 BranchMisprediction,
846 IoWait,
847 AlgorithmicComplexity,
848 CacheLatency,
849 ComputeBound,
850 SynchronizationOverhead,
851}
852
853#[derive(Debug, Clone)]
854pub struct PerformanceBottleneck {
855 pub bottleneck_type: BottleneckType,
856 pub severity: f64,
857 pub description: String,
858 pub mitigation_strategy: OptimizationStrategy,
859 pub size_range: (usize, usize),
860 pub impact: f64,
861 pub mitigation: String,
862}
863
864impl PerformanceBottleneck {
865 #[allow(dead_code)]
867 pub fn new(bottleneck_type: BottleneckType) -> Self {
868 Self {
869 bottleneck_type,
870 severity: 0.0,
871 description: String::new(),
872 mitigation_strategy: OptimizationStrategy::Scalar,
873 size_range: (0, 0),
874 impact: 0.0,
875 mitigation: String::new(),
876 }
877 }
878}
879
880#[derive(Debug, Clone)]
882pub struct ScalabilityAnalysis {
883 pub parallel_efficiency: HashMap<usize, f64>,
884 pub memory_scaling: MemoryScaling,
885 pub bottlenecks: Vec<PerformanceBottleneck>,
886}
887
888impl Default for ScalabilityAnalysis {
889 fn default() -> Self {
890 Self::new()
891 }
892}
893
894impl ScalabilityAnalysis {
895 #[allow(dead_code)]
897 pub fn new() -> Self {
898 Self {
899 parallel_efficiency: HashMap::new(),
900 memory_scaling: MemoryScaling::new(),
901 bottlenecks: Vec::new(),
902 }
903 }
904}
905
906#[derive(Debug, Clone)]
908pub struct BenchmarkResults {
909 pub operation_name: String,
910 pub measurements: Vec<BenchmarkMeasurement>,
911 pub strategy_summary: HashMap<OptimizationStrategy, StrategyPerformance>,
912 pub scalability_analysis: ScalabilityAnalysis,
913 pub recommendations: Vec<String>,
914 pub total_duration: Duration,
915}
916
917impl BenchmarkResults {
918 #[allow(dead_code)]
920 pub fn new(operation_name: String) -> Self {
921 Self {
922 operation_name,
923 measurements: Vec::new(),
924 strategy_summary: HashMap::new(),
925 scalability_analysis: ScalabilityAnalysis::new(),
926 recommendations: Vec::new(),
927 total_duration: Duration::from_secs(0),
928 }
929 }
930}
931
932pub mod presets {
934 use super::*;
935
936 #[allow(dead_code)]
941 pub fn advanced_comprehensive() -> BenchmarkConfig {
942 let mut strategies = HashSet::new();
943 strategies.insert(OptimizationStrategy::Scalar);
944 strategies.insert(OptimizationStrategy::Simd);
945 strategies.insert(OptimizationStrategy::Parallel);
946 strategies.insert(OptimizationStrategy::Gpu);
947 strategies.insert(OptimizationStrategy::Hybrid);
948 strategies.insert(OptimizationStrategy::CacheOptimized);
949 strategies.insert(OptimizationStrategy::MemoryBound);
950 strategies.insert(OptimizationStrategy::ComputeBound);
951 strategies.insert(OptimizationStrategy::ModernArchOptimized);
952 strategies.insert(OptimizationStrategy::VectorOptimized);
953 strategies.insert(OptimizationStrategy::EnergyEfficient);
954 strategies.insert(OptimizationStrategy::HighThroughput);
955
956 let sample_sizes = vec![
957 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000, 500_000, 1_000_000, 5_000_000,
958 10_000_000,
959 ];
960
961 BenchmarkConfig {
962 strategies,
963 sample_sizes,
964 warmup_iterations: 15,
965 measurement_iterations: 50,
966 measurement_time: Duration::from_secs(10),
967 min_duration: Duration::from_millis(10),
968 max_duration: Duration::from_secs(60),
969 confidence_level: 0.95,
970 max_cv: 0.1,
971 enable_profiling: true,
972 enable_memory_tracking: true,
973 tags: vec!["Advanced".to_string(), "comprehensive".to_string()],
974 }
975 }
976
977 #[allow(dead_code)]
982 pub fn modern_architectures() -> BenchmarkConfig {
983 let mut strategies = HashSet::new();
984 strategies.insert(OptimizationStrategy::ModernArchOptimized);
985 strategies.insert(OptimizationStrategy::VectorOptimized);
986 strategies.insert(OptimizationStrategy::EnergyEfficient);
987 strategies.insert(OptimizationStrategy::HighThroughput);
988
989 let sample_sizes = vec![1_000, 10_000, 100_000, 1_000_000, 10_000_000];
990
991 BenchmarkConfig {
992 strategies,
993 sample_sizes,
994 warmup_iterations: 10,
995 measurement_iterations: 30,
996 measurement_time: Duration::from_secs(8),
997 min_duration: Duration::from_millis(5),
998 max_duration: Duration::from_secs(30),
999 confidence_level: 0.95,
1000 max_cv: 0.1,
1001 enable_profiling: true,
1002 enable_memory_tracking: true,
1003 tags: vec!["modern".to_string(), "architecture".to_string()],
1004 }
1005 }
1006
1007 #[allow(dead_code)]
1012 pub fn array_operations() -> BenchmarkConfig {
1013 let mut strategies = HashSet::new();
1014 strategies.insert(OptimizationStrategy::Scalar);
1015 strategies.insert(OptimizationStrategy::Simd);
1016 strategies.insert(OptimizationStrategy::VectorOptimized);
1017 strategies.insert(OptimizationStrategy::CacheOptimized);
1018
1019 let sample_sizes = vec![100, 1_000, 10_000, 100_000];
1020
1021 BenchmarkConfig {
1022 strategies,
1023 sample_sizes,
1024 warmup_iterations: 10,
1025 measurement_iterations: 25,
1026 measurement_time: Duration::from_secs(5),
1027 min_duration: Duration::from_millis(1),
1028 max_duration: Duration::from_secs(15),
1029 confidence_level: 0.95,
1030 max_cv: 0.15,
1031 enable_profiling: false,
1032 enable_memory_tracking: true,
1033 tags: vec!["array".to_string(), "operations".to_string()],
1034 }
1035 }
1036
1037 #[allow(dead_code)]
1042 pub fn matrix_operations() -> BenchmarkConfig {
1043 let mut strategies = HashSet::new();
1044 strategies.insert(OptimizationStrategy::Scalar);
1045 strategies.insert(OptimizationStrategy::Parallel);
1046 strategies.insert(OptimizationStrategy::CacheOptimized);
1047 strategies.insert(OptimizationStrategy::ModernArchOptimized);
1048
1049 let sample_sizes = vec![100, 500, 1_000, 5_000];
1050
1051 BenchmarkConfig {
1052 strategies,
1053 sample_sizes,
1054 warmup_iterations: 5,
1055 measurement_iterations: 20,
1056 measurement_time: Duration::from_secs(8),
1057 min_duration: Duration::from_millis(2),
1058 max_duration: Duration::from_secs(20),
1059 confidence_level: 0.95,
1060 max_cv: 0.12,
1061 enable_profiling: true,
1062 enable_memory_tracking: true,
1063 tags: vec!["matrix".to_string(), "operations".to_string()],
1064 }
1065 }
1066
1067 #[allow(dead_code)]
1072 pub fn memory_intensive() -> BenchmarkConfig {
1073 let mut strategies = HashSet::new();
1074 strategies.insert(OptimizationStrategy::MemoryBound);
1075 strategies.insert(OptimizationStrategy::CacheOptimized);
1076 strategies.insert(OptimizationStrategy::HighThroughput);
1077
1078 let sample_sizes = vec![10_000, 100_000, 1_000_000];
1079
1080 BenchmarkConfig {
1081 strategies,
1082 sample_sizes,
1083 warmup_iterations: 3,
1084 measurement_iterations: 15,
1085 measurement_time: Duration::from_secs(12),
1086 min_duration: Duration::from_millis(5),
1087 max_duration: Duration::from_secs(45),
1088 confidence_level: 0.95,
1089 max_cv: 0.2,
1090 enable_profiling: true,
1091 enable_memory_tracking: true,
1092 tags: vec!["memory".to_string(), "intensive".to_string()],
1093 }
1094 }
1095}
1096
1097#[cfg(test)]
1098mod tests {
1099 use super::*;
1100
1101 #[test]
1102 fn test_benchmark_config() {
1103 let config = BenchmarkConfig::new()
1104 .with_warmup_iterations(5)
1105 .with_measurement_iterations(50)
1106 .with_confidence_level(0.99)
1107 .with_tag("test".to_string());
1108
1109 assert_eq!(config.warmup_iterations, 5);
1110 assert_eq!(config.measurement_iterations, 50);
1111 assert_eq!(config.confidence_level, 0.99);
1112 assert_eq!(config.tags, vec!["test"]);
1113 }
1114
1115 #[test]
1116 fn test_benchmark_measurement() {
1117 let measurement = BenchmarkMeasurement::new(Duration::from_millis(100))
1118 .with_memory_usage(1024)
1119 .with_custom_metric("ops".to_string(), 1000.0);
1120
1121 assert_eq!(measurement.execution_time, Duration::from_millis(100));
1122 assert_eq!(measurement.memory_usage, 1024);
1123 assert_eq!(measurement.custom_metrics["ops"], 1000.0);
1124 }
1125
1126 #[test]
1127 fn test_benchmark_statistics() {
1128 let measurements = vec![
1129 BenchmarkMeasurement::new(Duration::from_millis(100)),
1130 BenchmarkMeasurement::new(Duration::from_millis(110)),
1131 BenchmarkMeasurement::new(Duration::from_millis(90)),
1132 BenchmarkMeasurement::new(Duration::from_millis(105)),
1133 ];
1134
1135 let stats =
1136 BenchmarkStatistics::from_measurements(&measurements).expect("Operation failed");
1137
1138 assert_eq!(stats.sample_count, 4);
1139 assert!(stats.mean_execution_time > Duration::from_millis(95));
1140 assert!(stats.mean_execution_time < Duration::from_millis(110));
1141 assert!(stats.coefficient_of_variation > 0.0);
1142 }
1143
1144 #[test]
1145 fn test_benchmark_runner() {
1146 let config = BenchmarkConfig::new()
1147 .with_warmup_iterations(1)
1148 .with_measurement_iterations(5);
1149 let runner = BenchmarkRunner::new(config);
1150
1151 let result = runner
1152 .run("test_benchmark", || {
1153 std::thread::sleep(Duration::from_micros(100));
1155 Ok(())
1156 })
1157 .expect("Operation failed");
1158
1159 assert_eq!(result.name, "test_benchmark");
1160 assert_eq!(result.measurements.len(), 5);
1161 assert!(result.statistics.mean_execution_time > Duration::from_micros(50));
1162 }
1163}