1use scirs2_core::ndarray::{Array1, Array2};
4use std::collections::HashMap;
5use std::time::Duration;
6
7use super::{
8 config::{BenchmarkProtocol, DDPerformanceConfig, DDPerformanceMetric, StatisticalDepth},
9 sequences::DDSequence,
10 DDCircuitExecutor,
11};
12use crate::DeviceResult;
13
14#[cfg(feature = "scirs2")]
16use scirs2_stats::{ks_2samp, mean, pearsonr, shapiro_wilk, spearmanr, std, ttest_1samp};
17
18#[cfg(not(feature = "scirs2"))]
19use super::fallback_scirs2::{mean, pearsonr, std};
20
21#[derive(Debug, Clone)]
23pub struct DDPerformanceAnalysis {
24 pub metrics: HashMap<DDPerformanceMetric, f64>,
26 pub benchmark_results: BenchmarkResults,
28 pub statistical_analysis: DDStatisticalAnalysis,
30 pub comparative_analysis: Option<ComparativeAnalysis>,
32 pub performance_trends: PerformanceTrends,
34}
35
36#[derive(Debug, Clone)]
38pub struct BenchmarkResults {
39 pub randomized_benchmarking: Option<RandomizedBenchmarkingResults>,
41 pub process_tomography: Option<ProcessTomographyResults>,
43 pub gate_set_tomography: Option<GateSetTomographyResults>,
45 pub cross_entropy_benchmarking: Option<CrossEntropyResults>,
47 pub cycle_benchmarking: Option<CycleBenchmarkingResults>,
49}
50
51#[derive(Debug, Clone)]
53pub struct RandomizedBenchmarkingResults {
54 pub gate_fidelity: f64,
56 pub confidence_interval: (f64, f64),
58 pub decay_rate: f64,
60 pub sequences_tested: usize,
62 pub p_value: f64,
64}
65
66#[derive(Debug, Clone)]
68pub struct ProcessTomographyResults {
69 pub process_fidelity: f64,
71 pub process_matrix: Array2<f64>,
73 pub eigenvalues: Array1<f64>,
75 pub completeness: f64,
77}
78
79#[derive(Debug, Clone)]
81pub struct GateSetTomographyResults {
82 pub gate_set_fidelity: f64,
84 pub gate_fidelities: HashMap<String, f64>,
86 pub spam_errors: HashMap<String, f64>,
88 pub model_consistency: f64,
90}
91
92#[derive(Debug, Clone)]
94pub struct CrossEntropyResults {
95 pub cross_entropy_score: f64,
97 pub linear_xeb_fidelity: f64,
99 pub quantum_volume: usize,
101 pub confidence_level: f64,
103}
104
105#[derive(Debug, Clone)]
107pub struct CycleBenchmarkingResults {
108 pub cycle_fidelity: f64,
110 pub systematic_error_rate: f64,
112 pub stochastic_error_rate: f64,
114 pub leakage_rate: f64,
116}
117
118#[derive(Debug, Clone)]
120pub struct DDStatisticalAnalysis {
121 pub descriptive_stats: DescriptiveStatistics,
123 pub hypothesis_tests: HypothesisTestResults,
125 pub correlation_analysis: CorrelationAnalysis,
127 pub distribution_analysis: DistributionAnalysis,
129 pub confidence_intervals: ConfidenceIntervals,
131}
132
133#[derive(Debug, Clone)]
135pub struct DescriptiveStatistics {
136 pub means: HashMap<String, f64>,
138 pub standard_deviations: HashMap<String, f64>,
140 pub medians: HashMap<String, f64>,
142 pub percentiles: HashMap<String, Vec<f64>>,
144 pub ranges: HashMap<String, (f64, f64)>,
146}
147
148#[derive(Debug, Clone)]
150pub struct HypothesisTestResults {
151 pub t_test_results: HashMap<String, TTestResult>,
153 pub ks_test_results: HashMap<String, KSTestResult>,
155 pub normality_tests: HashMap<String, NormalityTestResult>,
157}
158
159#[derive(Debug, Clone)]
161pub struct TTestResult {
162 pub statistic: f64,
163 pub p_value: f64,
164 pub significant: bool,
165 pub effect_size: f64,
166}
167
168#[derive(Debug, Clone)]
170pub struct KSTestResult {
171 pub statistic: f64,
172 pub p_value: f64,
173 pub significant: bool,
174}
175
176#[derive(Debug, Clone)]
178pub struct NormalityTestResult {
179 pub shapiro_statistic: f64,
180 pub shapiro_p_value: f64,
181 pub is_normal: bool,
182}
183
184#[derive(Debug, Clone)]
186pub struct CorrelationAnalysis {
187 pub pearson_correlations: Array2<f64>,
189 pub spearman_correlations: Array2<f64>,
191 pub significant_correlations: Vec<(String, String, f64)>,
193}
194
195#[derive(Debug, Clone)]
197pub struct DistributionAnalysis {
198 pub best_fit_distributions: HashMap<String, String>,
200 pub distribution_parameters: HashMap<String, Vec<f64>>,
202 pub goodness_of_fit: HashMap<String, f64>,
204}
205
206#[derive(Debug, Clone)]
208pub struct ConfidenceIntervals {
209 pub mean_intervals: HashMap<String, (f64, f64)>,
211 pub bootstrap_intervals: HashMap<String, (f64, f64)>,
213 pub prediction_intervals: HashMap<String, (f64, f64)>,
215}
216
217#[derive(Debug, Clone)]
219pub struct ComparativeAnalysis {
220 pub relative_improvements: HashMap<DDPerformanceMetric, f64>,
222 pub significance_tests: HashMap<DDPerformanceMetric, bool>,
224 pub effect_sizes: HashMap<DDPerformanceMetric, f64>,
226 pub performance_ranking: usize,
228}
229
230#[derive(Debug, Clone)]
232pub struct PerformanceTrends {
233 pub trend_slopes: HashMap<DDPerformanceMetric, f64>,
235 pub trend_significance: HashMap<DDPerformanceMetric, f64>,
237 pub seasonality: HashMap<DDPerformanceMetric, bool>,
239 pub outliers: HashMap<DDPerformanceMetric, Vec<usize>>,
241}
242
243pub struct DDPerformanceAnalyzer {
245 pub config: DDPerformanceConfig,
246 pub historical_data: Vec<DDPerformanceAnalysis>,
247}
248
249impl DDPerformanceAnalyzer {
250 pub const fn new(config: DDPerformanceConfig) -> Self {
252 Self {
253 config,
254 historical_data: Vec::new(),
255 }
256 }
257
258 pub async fn analyze_performance(
260 &mut self,
261 sequence: &DDSequence,
262 executor: &dyn DDCircuitExecutor,
263 ) -> DeviceResult<DDPerformanceAnalysis> {
264 println!("Starting DD performance analysis");
265 let start_time = std::time::Instant::now();
266
267 let metrics = self
269 .calculate_performance_metrics(sequence, executor)
270 .await?;
271
272 let benchmark_results = if self.config.enable_benchmarking {
274 self.run_benchmarks(sequence, executor).await?
275 } else {
276 BenchmarkResults {
277 randomized_benchmarking: None,
278 process_tomography: None,
279 gate_set_tomography: None,
280 cross_entropy_benchmarking: None,
281 cycle_benchmarking: None,
282 }
283 };
284
285 let statistical_analysis = self.perform_statistical_analysis(&metrics, sequence)?;
287
288 let comparative_analysis = if self.historical_data.is_empty() {
290 None
291 } else {
292 Some(self.perform_comparative_analysis(&metrics)?)
293 };
294
295 let performance_trends = self.analyze_performance_trends(&metrics)?;
297
298 let analysis = DDPerformanceAnalysis {
299 metrics,
300 benchmark_results,
301 statistical_analysis,
302 comparative_analysis,
303 performance_trends,
304 };
305
306 self.historical_data.push(analysis.clone());
307
308 println!(
309 "DD performance analysis completed in {:?}",
310 start_time.elapsed()
311 );
312 Ok(analysis)
313 }
314
315 async fn calculate_performance_metrics(
317 &self,
318 sequence: &DDSequence,
319 executor: &dyn DDCircuitExecutor,
320 ) -> DeviceResult<HashMap<DDPerformanceMetric, f64>> {
321 let mut metrics = HashMap::new();
322
323 for metric in &self.config.metrics {
324 let value = match metric {
325 DDPerformanceMetric::CoherenceTime => {
326 self.measure_coherence_time(sequence, executor).await?
327 }
328 DDPerformanceMetric::ProcessFidelity => {
329 self.measure_process_fidelity(sequence, executor).await?
330 }
331 DDPerformanceMetric::GateOverhead => sequence.properties.pulse_count as f64,
332 DDPerformanceMetric::TimeOverhead => {
333 sequence.duration * 1e6 }
335 DDPerformanceMetric::RobustnessScore => {
336 self.calculate_robustness_score(sequence, executor).await?
337 }
338 DDPerformanceMetric::NoiseSuppressionFactor => {
339 self.calculate_noise_suppression(sequence, executor).await?
340 }
341 DDPerformanceMetric::ResourceEfficiency => {
342 self.calculate_resource_efficiency(sequence, executor)
343 .await?
344 }
345 };
346
347 metrics.insert(metric.clone(), value);
348 }
349
350 Ok(metrics)
351 }
352
353 pub async fn measure_coherence_time(
355 &self,
356 sequence: &DDSequence,
357 _executor: &dyn DDCircuitExecutor,
358 ) -> DeviceResult<f64> {
359 let base_t2 = 50e-6; let enhancement_factor: f64 = sequence.properties.noise_suppression.values().sum();
362 let suppression_factor =
363 1.0 + enhancement_factor / sequence.properties.noise_suppression.len() as f64;
364
365 Ok(base_t2 * suppression_factor * 1e6) }
367
368 pub async fn measure_process_fidelity(
370 &self,
371 sequence: &DDSequence,
372 _executor: &dyn DDCircuitExecutor,
373 ) -> DeviceResult<f64> {
374 let base_fidelity = 0.99;
376 let order_factor = 0.001 * (sequence.properties.sequence_order as f64);
377 let overhead_penalty = -0.0001 * (sequence.properties.pulse_count as f64);
378
379 Ok((base_fidelity + order_factor + overhead_penalty).clamp(0.0, 1.0))
380 }
381
382 pub async fn calculate_robustness_score(
384 &self,
385 sequence: &DDSequence,
386 _executor: &dyn DDCircuitExecutor,
387 ) -> DeviceResult<f64> {
388 let mut robustness = 0.0;
389
390 if sequence.properties.symmetry.time_reversal {
392 robustness += 0.25;
393 }
394 if sequence.properties.symmetry.phase_symmetry {
395 robustness += 0.25;
396 }
397 if sequence.properties.symmetry.rotational_symmetry {
398 robustness += 0.25;
399 }
400 if sequence.properties.symmetry.inversion_symmetry {
401 robustness += 0.25;
402 }
403
404 let noise_diversity = sequence.properties.noise_suppression.len() as f64 / 10.0;
406 robustness += noise_diversity.min(0.5);
407
408 Ok(robustness)
409 }
410
411 async fn calculate_noise_suppression(
413 &self,
414 sequence: &DDSequence,
415 _executor: &dyn DDCircuitExecutor,
416 ) -> DeviceResult<f64> {
417 let avg_suppression: f64 = sequence.properties.noise_suppression.values().sum::<f64>()
418 / sequence.properties.noise_suppression.len() as f64;
419 Ok(avg_suppression)
420 }
421
422 async fn calculate_resource_efficiency(
424 &self,
425 sequence: &DDSequence,
426 _executor: &dyn DDCircuitExecutor,
427 ) -> DeviceResult<f64> {
428 let coherence_improvement = self.measure_coherence_time(sequence, _executor).await? / 50.0; let resource_cost = sequence.properties.pulse_count as f64;
430
431 Ok(coherence_improvement / resource_cost.max(1.0))
432 }
433
434 async fn run_benchmarks(
436 &self,
437 sequence: &DDSequence,
438 executor: &dyn DDCircuitExecutor,
439 ) -> DeviceResult<BenchmarkResults> {
440 let mut results = BenchmarkResults {
441 randomized_benchmarking: None,
442 process_tomography: None,
443 gate_set_tomography: None,
444 cross_entropy_benchmarking: None,
445 cycle_benchmarking: None,
446 };
447
448 for protocol in &self.config.benchmarking_config.protocols {
449 match protocol {
450 BenchmarkProtocol::RandomizedBenchmarking => {
451 results.randomized_benchmarking =
452 Some(self.run_randomized_benchmarking(sequence, executor).await?);
453 }
454 BenchmarkProtocol::ProcessTomography => {
455 results.process_tomography =
456 Some(self.run_process_tomography(sequence, executor).await?);
457 }
458 BenchmarkProtocol::GateSetTomography => {
459 results.gate_set_tomography =
460 Some(self.run_gate_set_tomography(sequence, executor).await?);
461 }
462 BenchmarkProtocol::CrossEntropyBenchmarking => {
463 results.cross_entropy_benchmarking = Some(
464 self.run_cross_entropy_benchmarking(sequence, executor)
465 .await?,
466 );
467 }
468 BenchmarkProtocol::CycleBenchmarking => {
469 results.cycle_benchmarking =
470 Some(self.run_cycle_benchmarking(sequence, executor).await?);
471 }
472 }
473 }
474
475 Ok(results)
476 }
477
478 async fn run_randomized_benchmarking(
480 &self,
481 _sequence: &DDSequence,
482 _executor: &dyn DDCircuitExecutor,
483 ) -> DeviceResult<RandomizedBenchmarkingResults> {
484 Ok(RandomizedBenchmarkingResults {
486 gate_fidelity: 0.995,
487 confidence_interval: (0.990, 0.999),
488 decay_rate: 0.005,
489 sequences_tested: self.config.benchmarking_config.benchmark_runs,
490 p_value: 0.001,
491 })
492 }
493
494 async fn run_process_tomography(
496 &self,
497 _sequence: &DDSequence,
498 _executor: &dyn DDCircuitExecutor,
499 ) -> DeviceResult<ProcessTomographyResults> {
500 Ok(ProcessTomographyResults {
501 process_fidelity: 0.98,
502 process_matrix: Array2::eye(4),
503 eigenvalues: Array1::from_vec(vec![1.0, 0.99, 0.98, 0.97]),
504 completeness: 0.99,
505 })
506 }
507
508 async fn run_gate_set_tomography(
509 &self,
510 _sequence: &DDSequence,
511 _executor: &dyn DDCircuitExecutor,
512 ) -> DeviceResult<GateSetTomographyResults> {
513 let mut gate_fidelities = HashMap::new();
514 gate_fidelities.insert("X".to_string(), 0.995);
515 gate_fidelities.insert("Y".to_string(), 0.994);
516 gate_fidelities.insert("Z".to_string(), 0.999);
517
518 let mut spam_errors = HashMap::new();
519 spam_errors.insert("prep_error".to_string(), 0.001);
520 spam_errors.insert("meas_error".to_string(), 0.002);
521
522 Ok(GateSetTomographyResults {
523 gate_set_fidelity: 0.996,
524 gate_fidelities,
525 spam_errors,
526 model_consistency: 0.98,
527 })
528 }
529
530 async fn run_cross_entropy_benchmarking(
531 &self,
532 _sequence: &DDSequence,
533 _executor: &dyn DDCircuitExecutor,
534 ) -> DeviceResult<CrossEntropyResults> {
535 Ok(CrossEntropyResults {
536 cross_entropy_score: 2.1,
537 linear_xeb_fidelity: 0.92,
538 quantum_volume: 64,
539 confidence_level: 0.95,
540 })
541 }
542
543 async fn run_cycle_benchmarking(
544 &self,
545 _sequence: &DDSequence,
546 _executor: &dyn DDCircuitExecutor,
547 ) -> DeviceResult<CycleBenchmarkingResults> {
548 Ok(CycleBenchmarkingResults {
549 cycle_fidelity: 0.993,
550 systematic_error_rate: 0.002,
551 stochastic_error_rate: 0.005,
552 leakage_rate: 0.0001,
553 })
554 }
555
556 fn perform_statistical_analysis(
558 &self,
559 metrics: &HashMap<DDPerformanceMetric, f64>,
560 _sequence: &DDSequence,
561 ) -> DeviceResult<DDStatisticalAnalysis> {
562 let mut means = HashMap::new();
564 let mut standard_deviations = HashMap::new();
565 let mut medians = HashMap::new();
566 let mut percentiles = HashMap::new();
567 let mut ranges = HashMap::new();
568
569 for (metric, &value) in metrics {
571 let metric_name = format!("{metric:?}");
572 means.insert(metric_name.clone(), value);
573 standard_deviations.insert(metric_name.clone(), value * 0.1); medians.insert(metric_name.clone(), value);
575 percentiles.insert(
576 metric_name.clone(),
577 vec![value * 0.9, value * 1.1, value * 1.2, value * 1.3],
578 );
579 ranges.insert(metric_name, (value * 0.8, value * 1.2));
580 }
581
582 let descriptive_stats = DescriptiveStatistics {
583 means,
584 standard_deviations,
585 medians,
586 percentiles,
587 ranges,
588 };
589
590 let hypothesis_tests = HypothesisTestResults {
591 t_test_results: HashMap::new(),
592 ks_test_results: HashMap::new(),
593 normality_tests: HashMap::new(),
594 };
595
596 let correlation_analysis = CorrelationAnalysis {
597 pearson_correlations: Array2::eye(metrics.len().max(1)),
598 spearman_correlations: Array2::eye(metrics.len().max(1)),
599 significant_correlations: Vec::new(),
600 };
601
602 let distribution_analysis = DistributionAnalysis {
603 best_fit_distributions: HashMap::new(),
604 distribution_parameters: HashMap::new(),
605 goodness_of_fit: HashMap::new(),
606 };
607
608 let confidence_intervals = ConfidenceIntervals {
609 mean_intervals: HashMap::new(),
610 bootstrap_intervals: HashMap::new(),
611 prediction_intervals: HashMap::new(),
612 };
613
614 Ok(DDStatisticalAnalysis {
615 descriptive_stats,
616 hypothesis_tests,
617 correlation_analysis,
618 distribution_analysis,
619 confidence_intervals,
620 })
621 }
622
623 fn calculate_descriptive_statistics(
625 &self,
626 data: &HashMap<String, Array1<f64>>,
627 ) -> DeviceResult<DescriptiveStatistics> {
628 let mut means = HashMap::new();
629 let mut standard_deviations = HashMap::new();
630 let mut medians = HashMap::new();
631 let mut percentiles = HashMap::new();
632 let mut ranges = HashMap::new();
633
634 for (metric_name, values) in data {
635 #[cfg(feature = "scirs2")]
636 let mean_val = mean(&values.view()).unwrap_or(0.0);
637 #[cfg(not(feature = "scirs2"))]
638 let mean_val = values.sum() / values.len() as f64;
639
640 #[cfg(feature = "scirs2")]
641 let std_val = std(&values.view(), 1, None).unwrap_or(1.0);
642 #[cfg(not(feature = "scirs2"))]
643 let std_val = {
644 let mean = mean_val;
645 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
646 / (values.len() - 1) as f64;
647 variance.sqrt()
648 };
649
650 means.insert(metric_name.clone(), mean_val);
651 standard_deviations.insert(metric_name.clone(), std_val);
652
653 let mut sorted = values.to_vec();
655 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
656 let len = sorted.len();
657
658 let median = if len % 2 == 0 {
659 f64::midpoint(sorted[len / 2 - 1], sorted[len / 2])
660 } else {
661 sorted[len / 2]
662 };
663 medians.insert(metric_name.clone(), median);
664
665 let p25 = sorted[len / 4];
666 let p75 = sorted[3 * len / 4];
667 let p95 = sorted[95 * len / 100];
668 let p99 = sorted[99 * len / 100];
669
670 percentiles.insert(metric_name.clone(), vec![p25, p75, p95, p99]);
671 ranges.insert(metric_name.clone(), (sorted[0], sorted[len - 1]));
672 }
673
674 Ok(DescriptiveStatistics {
675 means,
676 standard_deviations,
677 medians,
678 percentiles,
679 ranges,
680 })
681 }
682
683 fn perform_hypothesis_tests(
685 &self,
686 _data: &HashMap<String, Array1<f64>>,
687 ) -> DeviceResult<HypothesisTestResults> {
688 Ok(HypothesisTestResults {
689 t_test_results: HashMap::new(),
690 ks_test_results: HashMap::new(),
691 normality_tests: HashMap::new(),
692 })
693 }
694
695 fn perform_correlation_analysis(
696 &self,
697 data: &HashMap<String, Array1<f64>>,
698 ) -> DeviceResult<CorrelationAnalysis> {
699 let n_metrics = data.len();
700 let pearson_correlations = Array2::eye(n_metrics);
701 let spearman_correlations = Array2::eye(n_metrics);
702
703 Ok(CorrelationAnalysis {
704 pearson_correlations,
705 spearman_correlations,
706 significant_correlations: Vec::new(),
707 })
708 }
709
710 fn analyze_distributions(
711 &self,
712 _data: &HashMap<String, Array1<f64>>,
713 ) -> DeviceResult<DistributionAnalysis> {
714 Ok(DistributionAnalysis {
715 best_fit_distributions: HashMap::new(),
716 distribution_parameters: HashMap::new(),
717 goodness_of_fit: HashMap::new(),
718 })
719 }
720
721 fn calculate_confidence_intervals(
722 &self,
723 _data: &HashMap<String, Array1<f64>>,
724 ) -> DeviceResult<ConfidenceIntervals> {
725 Ok(ConfidenceIntervals {
726 mean_intervals: HashMap::new(),
727 bootstrap_intervals: HashMap::new(),
728 prediction_intervals: HashMap::new(),
729 })
730 }
731
732 fn perform_comparative_analysis(
733 &self,
734 _metrics: &HashMap<DDPerformanceMetric, f64>,
735 ) -> DeviceResult<ComparativeAnalysis> {
736 Ok(ComparativeAnalysis {
737 relative_improvements: HashMap::new(),
738 significance_tests: HashMap::new(),
739 effect_sizes: HashMap::new(),
740 performance_ranking: 1,
741 })
742 }
743
744 fn analyze_performance_trends(
745 &self,
746 _metrics: &HashMap<DDPerformanceMetric, f64>,
747 ) -> DeviceResult<PerformanceTrends> {
748 Ok(PerformanceTrends {
749 trend_slopes: HashMap::new(),
750 trend_significance: HashMap::new(),
751 seasonality: HashMap::new(),
752 outliers: HashMap::new(),
753 })
754 }
755}