1use crate::builder::Circuit;
7use crate::noise_models::{NoiseAnalysisResult, NoiseModel};
8use crate::simulator_interface::{ExecutionResult, SimulatorBackend};
9use quantrs2_core::{
10 error::{QuantRS2Error, QuantRS2Result},
11 gate::GateOp,
12};
13use serde::{Deserialize, Serialize};
14use std::collections::HashMap;
15use std::time::{Duration, Instant};
16
17#[derive(Debug, Clone, PartialEq)]
22pub enum Distribution {
23 Normal { mean: f64, std_dev: f64 },
25 Uniform { min: f64, max: f64 },
27 Exponential { rate: f64 },
29 Beta { alpha: f64, beta: f64 },
31 Gamma { shape: f64, scale: f64 },
33 Poisson { lambda: f64 },
35 ChiSquared { degrees_of_freedom: usize },
37 StudentT { degrees_of_freedom: usize },
39}
40
41#[derive(Debug, Clone, PartialEq, Eq)]
43pub enum StatisticalTest {
44 KolmogorovSmirnov,
46 AndersonDarling,
48 ShapiroWilk,
50 MannWhitney,
52 Wilcoxon,
54 ChiSquaredGoodnessOfFit,
56 ANOVA,
58 KruskalWallis,
60}
61
62#[derive(Debug, Clone)]
64pub struct HypothesisTestResult {
65 pub test_statistic: f64,
67 pub p_value: f64,
69 pub critical_value: f64,
71 pub reject_null: bool,
73 pub significance_level: f64,
75 pub effect_size: Option<f64>,
77 pub confidence_interval: Option<(f64, f64)>,
79}
80
81#[derive(Debug, Clone, Serialize, Deserialize)]
83pub struct DescriptiveStats {
84 pub count: usize,
86 pub mean: f64,
88 pub std_dev: f64,
90 pub variance: f64,
92 pub min: f64,
94 pub max: f64,
96 pub median: f64,
98 pub q1: f64,
100 pub q3: f64,
102 pub iqr: f64,
104 pub skewness: f64,
106 pub kurtosis: f64,
108 pub mode: Option<f64>,
110}
111
112#[derive(Debug, Clone)]
114pub struct BenchmarkConfig {
115 pub num_runs: usize,
117 pub warmup_runs: usize,
119 pub timeout: Duration,
121 pub significance_level: f64,
123 pub collect_timing: bool,
125 pub collect_memory: bool,
127 pub collect_errors: bool,
129 pub seed: Option<u64>,
131}
132
133impl Default for BenchmarkConfig {
134 fn default() -> Self {
135 Self {
136 num_runs: 100,
137 warmup_runs: 10,
138 timeout: Duration::from_secs(60),
139 significance_level: 0.05,
140 collect_timing: true,
141 collect_memory: false,
142 collect_errors: true,
143 seed: None,
144 }
145 }
146}
147
148pub struct CircuitBenchmark {
150 config: BenchmarkConfig,
152 benchmark_data: Vec<BenchmarkRun>,
154 stats_analyzer: StatisticalAnalyzer,
156}
157
158#[derive(Debug, Clone)]
160pub struct BenchmarkRun {
161 pub run_id: usize,
163 pub execution_time: Duration,
165 pub memory_usage: Option<usize>,
167 pub success: bool,
169 pub error_message: Option<String>,
171 pub circuit_metrics: CircuitMetrics,
173 pub execution_results: Option<ExecutionResult>,
175 pub noise_analysis: Option<NoiseAnalysisResult>,
177 pub custom_metrics: HashMap<String, f64>,
179}
180
181#[derive(Debug, Clone)]
183pub struct CircuitMetrics {
184 pub depth: usize,
186 pub gate_count: usize,
188 pub gate_counts: HashMap<String, usize>,
190 pub two_qubit_gates: usize,
192 pub fidelity: Option<f64>,
194 pub error_rate: Option<f64>,
196}
197
198#[derive(Debug, Clone)]
200pub struct BenchmarkReport {
201 pub config: BenchmarkConfig,
203 pub completed_runs: usize,
205 pub success_rate: f64,
207 pub timing_stats: DescriptiveStats,
209 pub memory_stats: Option<DescriptiveStats>,
211 pub regression_analysis: Option<RegressionAnalysis>,
213 pub distribution_fit: Option<DistributionFit>,
215 pub outlier_analysis: OutlierAnalysis,
217 pub baseline_comparison: Option<BaselineComparison>,
219 pub statistical_tests: Vec<HypothesisTestResult>,
221 pub insights: Vec<PerformanceInsight>,
223}
224
225#[derive(Debug, Clone)]
227pub struct RegressionAnalysis {
228 pub slope: f64,
230 pub intercept: f64,
232 pub r_squared: f64,
234 pub slope_p_value: f64,
236 pub significant_trend: bool,
238 pub degradation_per_run: f64,
240}
241
242#[derive(Debug, Clone)]
244pub struct DistributionFit {
245 pub best_distribution: Distribution,
247 pub goodness_of_fit: f64,
249 pub fit_p_value: f64,
251 pub alternative_fits: Vec<(Distribution, f64)>,
253}
254
255#[derive(Debug, Clone)]
257pub struct OutlierAnalysis {
258 pub num_outliers: usize,
260 pub outlier_indices: Vec<usize>,
262 pub detection_method: OutlierDetectionMethod,
264 pub threshold: f64,
266 pub outlier_impact: OutlierImpact,
268}
269
270#[derive(Debug, Clone, PartialEq)]
272pub enum OutlierDetectionMethod {
273 IQR { multiplier: f64 },
275 ZScore { threshold: f64 },
277 ModifiedZScore { threshold: f64 },
279 IsolationForest,
281 LocalOutlierFactor,
283}
284
285#[derive(Debug, Clone)]
287pub struct OutlierImpact {
288 pub mean_change: f64,
290 pub std_dev_change: f64,
292 pub median_change: f64,
294 pub relative_impact: f64,
296}
297
298#[derive(Debug, Clone)]
300pub struct BaselineComparison {
301 pub baseline_name: String,
303 pub performance_factor: f64,
305 pub significance: HypothesisTestResult,
307 pub difference_ci: (f64, f64),
309 pub effect_size: f64,
311 pub practical_significance: PracticalSignificance,
313}
314
315#[derive(Debug, Clone, PartialEq, Eq)]
317pub enum PracticalSignificance {
318 Negligible,
320 Small,
322 Medium,
324 Large,
326 VeryLarge,
328}
329
330#[derive(Debug, Clone)]
332pub struct PerformanceInsight {
333 pub category: InsightCategory,
335 pub message: String,
337 pub confidence: f64,
339 pub evidence: Vec<String>,
341 pub recommendations: Vec<String>,
343}
344
345#[derive(Debug, Clone, PartialEq, Eq)]
347pub enum InsightCategory {
348 PerformanceDegradation,
350 PerformanceImprovement,
352 HighVariability,
354 OutliersDetected,
356 MemoryUsage,
358 ErrorRate,
360 OptimizationOpportunity,
362}
363
364impl CircuitBenchmark {
365 #[must_use]
367 pub const fn new(config: BenchmarkConfig) -> Self {
368 Self {
369 config,
370 benchmark_data: Vec::new(),
371 stats_analyzer: StatisticalAnalyzer::new(),
372 }
373 }
374
375 pub fn run_benchmark<const N: usize>(
377 &mut self,
378 circuit: &Circuit<N>,
379 simulator: &dyn SimulatorExecutor,
380 noise_model: Option<&NoiseModel>,
381 ) -> QuantRS2Result<BenchmarkReport> {
382 self.benchmark_data.clear();
383
384 let total_runs = self.config.num_runs + self.config.warmup_runs;
385
386 for run_id in 0..total_runs {
387 let is_warmup = run_id < self.config.warmup_runs;
388
389 match self.run_single_benchmark(circuit, simulator, noise_model, run_id) {
390 Ok(run_data) => {
391 if !is_warmup {
392 self.benchmark_data.push(run_data);
393 }
394 }
395 Err(e) => {
396 if !is_warmup {
397 let failed_run = BenchmarkRun {
399 run_id,
400 execution_time: Duration::from_millis(0),
401 memory_usage: None,
402 success: false,
403 error_message: Some(e.to_string()),
404 circuit_metrics: self.calculate_circuit_metrics(circuit),
405 execution_results: None,
406 noise_analysis: None,
407 custom_metrics: HashMap::new(),
408 };
409 self.benchmark_data.push(failed_run);
410 }
411 }
412 }
413 }
414
415 self.generate_benchmark_report()
416 }
417
418 fn run_single_benchmark<const N: usize>(
420 &self,
421 circuit: &Circuit<N>,
422 simulator: &dyn SimulatorExecutor,
423 noise_model: Option<&NoiseModel>,
424 run_id: usize,
425 ) -> QuantRS2Result<BenchmarkRun> {
426 let start_time = Instant::now();
427 let start_memory = if self.config.collect_memory {
428 Some(self.get_memory_usage())
429 } else {
430 None
431 };
432
433 let execution_results = None; let end_time = Instant::now();
437 let end_memory = if self.config.collect_memory {
438 Some(self.get_memory_usage())
439 } else {
440 None
441 };
442
443 let execution_time = end_time - start_time;
444 let memory_usage = match (start_memory, end_memory) {
445 (Some(start), Some(end)) => Some(end.saturating_sub(start)),
446 _ => None,
447 };
448
449 let noise_analysis = if let Some(noise) = noise_model {
451 None
453 } else {
454 None
455 };
456
457 Ok(BenchmarkRun {
458 run_id,
459 execution_time,
460 memory_usage,
461 success: true,
462 error_message: None,
463 circuit_metrics: self.calculate_circuit_metrics(circuit),
464 execution_results,
465 noise_analysis,
466 custom_metrics: HashMap::new(),
467 })
468 }
469
470 fn calculate_circuit_metrics<const N: usize>(&self, circuit: &Circuit<N>) -> CircuitMetrics {
472 let gate_count = circuit.gates().len();
473 let mut gate_counts = HashMap::new();
474 let mut two_qubit_gates = 0;
475
476 for gate in circuit.gates() {
477 let gate_name = gate.name();
478 *gate_counts.entry(gate_name.to_string()).or_insert(0) += 1;
479
480 if gate.qubits().len() == 2 {
481 two_qubit_gates += 1;
482 }
483 }
484
485 CircuitMetrics {
486 depth: gate_count, gate_count,
488 gate_counts,
489 two_qubit_gates,
490 fidelity: None,
491 error_rate: None,
492 }
493 }
494
495 const fn get_memory_usage(&self) -> usize {
497 0
499 }
500
501 fn generate_benchmark_report(&self) -> QuantRS2Result<BenchmarkReport> {
503 let completed_runs = self.benchmark_data.len();
504 let successful_runs: Vec<_> = self
505 .benchmark_data
506 .iter()
507 .filter(|run| run.success)
508 .collect();
509
510 let success_rate = successful_runs.len() as f64 / completed_runs as f64;
511
512 let timing_data: Vec<f64> = successful_runs
514 .iter()
515 .map(|run| run.execution_time.as_secs_f64())
516 .collect();
517
518 let timing_stats = self
519 .stats_analyzer
520 .calculate_descriptive_stats(&timing_data)?;
521
522 let memory_stats = if self.config.collect_memory {
524 let memory_data: Vec<f64> = successful_runs
525 .iter()
526 .filter_map(|run| run.memory_usage.map(|m| m as f64))
527 .collect();
528
529 if memory_data.is_empty() {
530 None
531 } else {
532 Some(
533 self.stats_analyzer
534 .calculate_descriptive_stats(&memory_data)?,
535 )
536 }
537 } else {
538 None
539 };
540
541 let regression_analysis = self
543 .stats_analyzer
544 .perform_regression_analysis(&timing_data)?;
545
546 let distribution_fit = self.stats_analyzer.fit_distributions(&timing_data)?;
548
549 let outlier_analysis = self.stats_analyzer.detect_outliers(
551 &timing_data,
552 OutlierDetectionMethod::IQR { multiplier: 1.5 },
553 )?;
554
555 let insights = self.generate_performance_insights(
557 &timing_stats,
558 ®ression_analysis,
559 &outlier_analysis,
560 success_rate,
561 );
562
563 Ok(BenchmarkReport {
564 config: self.config.clone(),
565 completed_runs,
566 success_rate,
567 timing_stats,
568 memory_stats,
569 regression_analysis: Some(regression_analysis),
570 distribution_fit: Some(distribution_fit),
571 outlier_analysis,
572 baseline_comparison: None,
573 statistical_tests: Vec::new(),
574 insights,
575 })
576 }
577
578 fn generate_performance_insights(
580 &self,
581 timing_stats: &DescriptiveStats,
582 regression: &RegressionAnalysis,
583 outliers: &OutlierAnalysis,
584 success_rate: f64,
585 ) -> Vec<PerformanceInsight> {
586 let mut insights = Vec::new();
587
588 if regression.significant_trend && regression.slope > 0.0 {
590 insights.push(PerformanceInsight {
591 category: InsightCategory::PerformanceDegradation,
592 message: format!(
593 "Significant performance degradation detected: {:.4} seconds per run increase",
594 regression.degradation_per_run
595 ),
596 confidence: 1.0 - regression.slope_p_value,
597 evidence: vec![
598 format!("Linear trend slope: {:.6}", regression.slope),
599 format!("R-squared: {:.4}", regression.r_squared),
600 format!("P-value: {:.4}", regression.slope_p_value),
601 ],
602 recommendations: vec![
603 "Investigate potential memory leaks".to_string(),
604 "Check for resource contention".to_string(),
605 "Profile execution to identify bottlenecks".to_string(),
606 ],
607 });
608 }
609
610 let coefficient_of_variation = timing_stats.std_dev / timing_stats.mean;
612 if coefficient_of_variation > 0.2 {
613 insights.push(PerformanceInsight {
614 category: InsightCategory::HighVariability,
615 message: format!(
616 "High performance variability detected: CV = {:.2}%",
617 coefficient_of_variation * 100.0
618 ),
619 confidence: 0.8,
620 evidence: vec![
621 format!("Standard deviation: {:.4} seconds", timing_stats.std_dev),
622 format!("Mean: {:.4} seconds", timing_stats.mean),
623 format!(
624 "Coefficient of variation: {:.2}%",
625 coefficient_of_variation * 100.0
626 ),
627 ],
628 recommendations: vec![
629 "Increase warm-up runs to stabilize performance".to_string(),
630 "Check for system load variations".to_string(),
631 "Consider running benchmarks in isolated environment".to_string(),
632 ],
633 });
634 }
635
636 if outliers.num_outliers > 0 {
638 let outlier_percentage =
639 outliers.num_outliers as f64 / timing_stats.count as f64 * 100.0;
640 insights.push(PerformanceInsight {
641 category: InsightCategory::OutliersDetected,
642 message: format!(
643 "Performance outliers detected: {} outliers ({:.1}% of runs)",
644 outliers.num_outliers, outlier_percentage
645 ),
646 confidence: 0.9,
647 evidence: vec![
648 format!("Number of outliers: {}", outliers.num_outliers),
649 format!("Outlier percentage: {:.1}%", outlier_percentage),
650 format!("Detection method: {:?}", outliers.detection_method),
651 ],
652 recommendations: vec![
653 "Investigate causes of outlier runs".to_string(),
654 "Consider removing outliers from performance metrics".to_string(),
655 "Check for system interruptions during benchmarking".to_string(),
656 ],
657 });
658 }
659
660 if success_rate < 0.95 {
662 insights.push(PerformanceInsight {
663 category: InsightCategory::ErrorRate,
664 message: format!("Low success rate detected: {:.1}%", success_rate * 100.0),
665 confidence: 1.0,
666 evidence: vec![
667 format!("Success rate: {:.1}%", success_rate * 100.0),
668 format!(
669 "Failed runs: {}",
670 timing_stats.count - (timing_stats.count as f64 * success_rate) as usize
671 ),
672 ],
673 recommendations: vec![
674 "Investigate failure causes".to_string(),
675 "Check circuit validity and simulator compatibility".to_string(),
676 "Increase timeout limits if timeouts are occurring".to_string(),
677 ],
678 });
679 }
680
681 insights
682 }
683
684 pub fn compare_with_baseline(
686 &self,
687 baseline: &BenchmarkReport,
688 ) -> QuantRS2Result<BaselineComparison> {
689 if self.benchmark_data.is_empty() {
690 return Err(QuantRS2Error::InvalidInput(
691 "No benchmark data available for comparison".to_string(),
692 ));
693 }
694
695 let current_timing: Vec<f64> = self
696 .benchmark_data
697 .iter()
698 .filter(|run| run.success)
699 .map(|run| run.execution_time.as_secs_f64())
700 .collect();
701
702 let baseline_mean = baseline.timing_stats.mean;
703 let current_mean = self
704 .stats_analyzer
705 .calculate_descriptive_stats(¤t_timing)?
706 .mean;
707
708 let performance_factor = current_mean / baseline_mean;
709
710 let significance = self.stats_analyzer.mann_whitney_test(
712 ¤t_timing,
713 &[baseline_mean], self.config.significance_level,
715 )?;
716
717 let effect_size = (current_mean - baseline_mean) / baseline.timing_stats.std_dev;
719
720 let practical_significance = match effect_size.abs() {
722 x if x < 0.2 => PracticalSignificance::Negligible,
723 x if x < 0.5 => PracticalSignificance::Small,
724 x if x < 0.8 => PracticalSignificance::Medium,
725 x if x < 1.2 => PracticalSignificance::Large,
726 _ => PracticalSignificance::VeryLarge,
727 };
728
729 Ok(BaselineComparison {
730 baseline_name: "baseline".to_string(),
731 performance_factor,
732 significance,
733 difference_ci: (0.0, 0.0), effect_size,
735 practical_significance,
736 })
737 }
738}
739
740pub struct StatisticalAnalyzer;
742
743impl StatisticalAnalyzer {
744 #[must_use]
746 pub const fn new() -> Self {
747 Self
748 }
749
750 pub fn calculate_descriptive_stats(&self, data: &[f64]) -> QuantRS2Result<DescriptiveStats> {
752 if data.is_empty() {
753 return Err(QuantRS2Error::InvalidInput("Empty data".to_string()));
754 }
755
756 let mut sorted_data = data.to_vec();
757 sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
758
759 let count = data.len();
760 let mean = data.iter().sum::<f64>() / count as f64;
761 let variance = data.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / count as f64;
762 let std_dev = variance.sqrt();
763 let min = sorted_data[0];
764 let max = sorted_data[count - 1];
765
766 let median = if count % 2 == 0 {
767 f64::midpoint(sorted_data[count / 2 - 1], sorted_data[count / 2])
768 } else {
769 sorted_data[count / 2]
770 };
771
772 let q1 = self.percentile(&sorted_data, 0.25);
773 let q3 = self.percentile(&sorted_data, 0.75);
774 let iqr = q3 - q1;
775
776 let skewness = self.calculate_skewness(data, mean, std_dev);
778 let kurtosis = self.calculate_kurtosis(data, mean, std_dev);
779
780 Ok(DescriptiveStats {
781 count,
782 mean,
783 std_dev,
784 variance,
785 min,
786 max,
787 median,
788 q1,
789 q3,
790 iqr,
791 skewness,
792 kurtosis,
793 mode: None, })
795 }
796
797 fn percentile(&self, sorted_data: &[f64], p: f64) -> f64 {
799 let index = (p * (sorted_data.len() - 1) as f64).round() as usize;
800 sorted_data[index.min(sorted_data.len() - 1)]
801 }
802
803 fn calculate_skewness(&self, data: &[f64], mean: f64, std_dev: f64) -> f64 {
805 let n = data.len() as f64;
806 let skew_sum = data
807 .iter()
808 .map(|x| ((x - mean) / std_dev).powi(3))
809 .sum::<f64>();
810 skew_sum / n
811 }
812
813 fn calculate_kurtosis(&self, data: &[f64], mean: f64, std_dev: f64) -> f64 {
815 let n = data.len() as f64;
816 let kurt_sum = data
817 .iter()
818 .map(|x| ((x - mean) / std_dev).powi(4))
819 .sum::<f64>();
820 kurt_sum / n - 3.0 }
822
823 pub fn perform_regression_analysis(&self, data: &[f64]) -> QuantRS2Result<RegressionAnalysis> {
825 if data.len() < 3 {
826 return Err(QuantRS2Error::InvalidInput(
827 "Insufficient data for regression".to_string(),
828 ));
829 }
830
831 let n = data.len() as f64;
832 let x_values: Vec<f64> = (0..data.len()).map(|i| i as f64).collect();
833
834 let x_mean = x_values.iter().sum::<f64>() / n;
835 let y_mean = data.iter().sum::<f64>() / n;
836
837 let numerator: f64 = x_values
838 .iter()
839 .zip(data.iter())
840 .map(|(x, y)| (x - x_mean) * (y - y_mean))
841 .sum();
842
843 let denominator: f64 = x_values.iter().map(|x| (x - x_mean).powi(2)).sum();
844
845 let slope = numerator / denominator;
846 let intercept = slope.mul_add(-x_mean, y_mean);
847
848 let ss_tot: f64 = data.iter().map(|y| (y - y_mean).powi(2)).sum();
850 let ss_res: f64 = x_values
851 .iter()
852 .zip(data.iter())
853 .map(|(x, y)| {
854 let predicted = slope * x + intercept;
855 (y - predicted).powi(2)
856 })
857 .sum();
858
859 let r_squared = 1.0 - (ss_res / ss_tot);
860
861 let slope_p_value = if slope.abs() > 0.001 { 0.05 } else { 0.5 };
863 let significant_trend = slope_p_value < 0.05;
864
865 Ok(RegressionAnalysis {
866 slope,
867 intercept,
868 r_squared,
869 slope_p_value,
870 significant_trend,
871 degradation_per_run: slope,
872 })
873 }
874
875 pub fn fit_distributions(&self, data: &[f64]) -> QuantRS2Result<DistributionFit> {
877 let stats = self.calculate_descriptive_stats(data)?;
878
879 let normal_dist = Distribution::Normal {
881 mean: stats.mean,
882 std_dev: stats.std_dev,
883 };
884
885 let goodness_of_fit = 0.8; let fit_p_value = 0.3; Ok(DistributionFit {
890 best_distribution: normal_dist,
891 goodness_of_fit,
892 fit_p_value,
893 alternative_fits: Vec::new(),
894 })
895 }
896
897 pub fn detect_outliers(
899 &self,
900 data: &[f64],
901 method: OutlierDetectionMethod,
902 ) -> QuantRS2Result<OutlierAnalysis> {
903 let outlier_indices = match method {
904 OutlierDetectionMethod::IQR { multiplier } => {
905 self.detect_outliers_iqr(data, multiplier)?
906 }
907 OutlierDetectionMethod::ZScore { threshold } => {
908 self.detect_outliers_zscore(data, threshold)?
909 }
910 _ => Vec::new(), };
912
913 let num_outliers = outlier_indices.len();
914
915 let outlier_impact = if num_outliers > 0 {
917 self.calculate_outlier_impact(data, &outlier_indices)?
918 } else {
919 OutlierImpact {
920 mean_change: 0.0,
921 std_dev_change: 0.0,
922 median_change: 0.0,
923 relative_impact: 0.0,
924 }
925 };
926
927 Ok(OutlierAnalysis {
928 num_outliers,
929 outlier_indices,
930 detection_method: method,
931 threshold: 1.5, outlier_impact,
933 })
934 }
935
936 fn detect_outliers_iqr(&self, data: &[f64], multiplier: f64) -> QuantRS2Result<Vec<usize>> {
938 let stats = self.calculate_descriptive_stats(data)?;
939 let lower_bound = multiplier.mul_add(-stats.iqr, stats.q1);
940 let upper_bound = multiplier.mul_add(stats.iqr, stats.q3);
941
942 Ok(data
943 .iter()
944 .enumerate()
945 .filter_map(|(i, &value)| {
946 if value < lower_bound || value > upper_bound {
947 Some(i)
948 } else {
949 None
950 }
951 })
952 .collect())
953 }
954
955 fn detect_outliers_zscore(&self, data: &[f64], threshold: f64) -> QuantRS2Result<Vec<usize>> {
957 let stats = self.calculate_descriptive_stats(data)?;
958
959 Ok(data
960 .iter()
961 .enumerate()
962 .filter_map(|(i, &value)| {
963 let z_score = (value - stats.mean) / stats.std_dev;
964 if z_score.abs() > threshold {
965 Some(i)
966 } else {
967 None
968 }
969 })
970 .collect())
971 }
972
973 fn calculate_outlier_impact(
975 &self,
976 data: &[f64],
977 outlier_indices: &[usize],
978 ) -> QuantRS2Result<OutlierImpact> {
979 let original_stats = self.calculate_descriptive_stats(data)?;
980
981 let filtered_data: Vec<f64> = data
983 .iter()
984 .enumerate()
985 .filter_map(|(i, &value)| {
986 if outlier_indices.contains(&i) {
987 None
988 } else {
989 Some(value)
990 }
991 })
992 .collect();
993
994 let filtered_stats = self.calculate_descriptive_stats(&filtered_data)?;
995
996 let mean_change = (original_stats.mean - filtered_stats.mean).abs();
997 let std_dev_change = (original_stats.std_dev - filtered_stats.std_dev).abs();
998 let median_change = (original_stats.median - filtered_stats.median).abs();
999 let relative_impact = mean_change / original_stats.mean * 100.0;
1000
1001 Ok(OutlierImpact {
1002 mean_change,
1003 std_dev_change,
1004 median_change,
1005 relative_impact,
1006 })
1007 }
1008
1009 pub fn mann_whitney_test(
1011 &self,
1012 sample1: &[f64],
1013 sample2: &[f64],
1014 significance_level: f64,
1015 ) -> QuantRS2Result<HypothesisTestResult> {
1016 let test_statistic = 0.0; let p_value = 0.1; let critical_value = 1.96; let reject_null = p_value < significance_level;
1021
1022 Ok(HypothesisTestResult {
1023 test_statistic,
1024 p_value,
1025 critical_value,
1026 reject_null,
1027 significance_level,
1028 effect_size: None,
1029 confidence_interval: None,
1030 })
1031 }
1032}
1033
1034pub trait SimulatorExecutor {
1036 fn execute(&self, circuit: &dyn std::any::Any) -> QuantRS2Result<ExecutionResult>;
1037}
1038
1039impl Default for StatisticalAnalyzer {
1040 fn default() -> Self {
1041 Self::new()
1042 }
1043}
1044
1045#[cfg(test)]
1046mod tests {
1047 use super::*;
1048
1049 #[test]
1050 fn test_descriptive_stats() {
1051 let analyzer = StatisticalAnalyzer::new();
1052 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1053
1054 let stats = analyzer
1055 .calculate_descriptive_stats(&data)
1056 .expect("calculate_descriptive_stats should succeed");
1057 assert_eq!(stats.mean, 3.0);
1058 assert_eq!(stats.median, 3.0);
1059 assert_eq!(stats.min, 1.0);
1060 assert_eq!(stats.max, 5.0);
1061 }
1062
1063 #[test]
1064 fn test_outlier_detection_iqr() {
1065 let analyzer = StatisticalAnalyzer::new();
1066 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 100.0]; let outliers = analyzer
1069 .detect_outliers_iqr(&data, 1.5)
1070 .expect("outlier detection should succeed");
1071 assert_eq!(outliers.len(), 1);
1072 assert_eq!(outliers[0], 5); }
1074
1075 #[test]
1076 fn test_regression_analysis() {
1077 let analyzer = StatisticalAnalyzer::new();
1078 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let regression = analyzer
1081 .perform_regression_analysis(&data)
1082 .expect("perform_regression_analysis should succeed");
1083 assert!((regression.slope - 1.0).abs() < 1e-10);
1084 assert!((regression.r_squared - 1.0).abs() < 1e-10);
1085 }
1086
1087 #[test]
1088 fn test_benchmark_config() {
1089 let config = BenchmarkConfig::default();
1090 assert_eq!(config.num_runs, 100);
1091 assert_eq!(config.warmup_runs, 10);
1092 assert_eq!(config.significance_level, 0.05);
1093 }
1094
1095 #[test]
1096 fn test_distribution_creation() {
1097 let normal = Distribution::Normal {
1098 mean: 0.0,
1099 std_dev: 1.0,
1100 };
1101 match normal {
1102 Distribution::Normal { mean, std_dev } => {
1103 assert_eq!(mean, 0.0);
1104 assert_eq!(std_dev, 1.0);
1105 }
1106 _ => panic!("Wrong distribution type"),
1107 }
1108 }
1109}