1use scirs2_core::ndarray::{Array1, Array2};
4use std::collections::HashMap;
5use std::time::Duration;
6
7use super::{
8 config::{BenchmarkProtocol, DDPerformanceConfig, DDPerformanceMetric, StatisticalDepth},
9 sequences::DDSequence,
10 DDCircuitExecutor,
11};
12use crate::DeviceResult;
13
14#[cfg(feature = "scirs2")]
16use scirs2_stats::{ks_2samp, mean, pearsonr, shapiro_wilk, spearmanr, std, ttest_1samp};
17
18#[cfg(not(feature = "scirs2"))]
19use super::fallback_scirs2::{mean, pearsonr, std};
20
21#[derive(Debug, Clone)]
23pub struct DDPerformanceAnalysis {
24 pub metrics: HashMap<DDPerformanceMetric, f64>,
26 pub benchmark_results: BenchmarkResults,
28 pub statistical_analysis: DDStatisticalAnalysis,
30 pub comparative_analysis: Option<ComparativeAnalysis>,
32 pub performance_trends: PerformanceTrends,
34}
35
36#[derive(Debug, Clone)]
38pub struct BenchmarkResults {
39 pub randomized_benchmarking: Option<RandomizedBenchmarkingResults>,
41 pub process_tomography: Option<ProcessTomographyResults>,
43 pub gate_set_tomography: Option<GateSetTomographyResults>,
45 pub cross_entropy_benchmarking: Option<CrossEntropyResults>,
47 pub cycle_benchmarking: Option<CycleBenchmarkingResults>,
49}
50
51#[derive(Debug, Clone)]
53pub struct RandomizedBenchmarkingResults {
54 pub gate_fidelity: f64,
56 pub confidence_interval: (f64, f64),
58 pub decay_rate: f64,
60 pub sequences_tested: usize,
62 pub p_value: f64,
64}
65
66#[derive(Debug, Clone)]
68pub struct ProcessTomographyResults {
69 pub process_fidelity: f64,
71 pub process_matrix: Array2<f64>,
73 pub eigenvalues: Array1<f64>,
75 pub completeness: f64,
77}
78
79#[derive(Debug, Clone)]
81pub struct GateSetTomographyResults {
82 pub gate_set_fidelity: f64,
84 pub gate_fidelities: HashMap<String, f64>,
86 pub spam_errors: HashMap<String, f64>,
88 pub model_consistency: f64,
90}
91
92#[derive(Debug, Clone)]
94pub struct CrossEntropyResults {
95 pub cross_entropy_score: f64,
97 pub linear_xeb_fidelity: f64,
99 pub quantum_volume: usize,
101 pub confidence_level: f64,
103}
104
105#[derive(Debug, Clone)]
107pub struct CycleBenchmarkingResults {
108 pub cycle_fidelity: f64,
110 pub systematic_error_rate: f64,
112 pub stochastic_error_rate: f64,
114 pub leakage_rate: f64,
116}
117
118#[derive(Debug, Clone)]
120pub struct DDStatisticalAnalysis {
121 pub descriptive_stats: DescriptiveStatistics,
123 pub hypothesis_tests: HypothesisTestResults,
125 pub correlation_analysis: CorrelationAnalysis,
127 pub distribution_analysis: DistributionAnalysis,
129 pub confidence_intervals: ConfidenceIntervals,
131}
132
133#[derive(Debug, Clone)]
135pub struct DescriptiveStatistics {
136 pub means: HashMap<String, f64>,
138 pub standard_deviations: HashMap<String, f64>,
140 pub medians: HashMap<String, f64>,
142 pub percentiles: HashMap<String, Vec<f64>>,
144 pub ranges: HashMap<String, (f64, f64)>,
146}
147
148#[derive(Debug, Clone)]
150pub struct HypothesisTestResults {
151 pub t_test_results: HashMap<String, TTestResult>,
153 pub ks_test_results: HashMap<String, KSTestResult>,
155 pub normality_tests: HashMap<String, NormalityTestResult>,
157}
158
159#[derive(Debug, Clone)]
161pub struct TTestResult {
162 pub statistic: f64,
163 pub p_value: f64,
164 pub significant: bool,
165 pub effect_size: f64,
166}
167
168#[derive(Debug, Clone)]
170pub struct KSTestResult {
171 pub statistic: f64,
172 pub p_value: f64,
173 pub significant: bool,
174}
175
176#[derive(Debug, Clone)]
178pub struct NormalityTestResult {
179 pub shapiro_statistic: f64,
180 pub shapiro_p_value: f64,
181 pub is_normal: bool,
182}
183
184#[derive(Debug, Clone)]
186pub struct CorrelationAnalysis {
187 pub pearson_correlations: Array2<f64>,
189 pub spearman_correlations: Array2<f64>,
191 pub significant_correlations: Vec<(String, String, f64)>,
193}
194
195#[derive(Debug, Clone)]
197pub struct DistributionAnalysis {
198 pub best_fit_distributions: HashMap<String, String>,
200 pub distribution_parameters: HashMap<String, Vec<f64>>,
202 pub goodness_of_fit: HashMap<String, f64>,
204}
205
206#[derive(Debug, Clone)]
208pub struct ConfidenceIntervals {
209 pub mean_intervals: HashMap<String, (f64, f64)>,
211 pub bootstrap_intervals: HashMap<String, (f64, f64)>,
213 pub prediction_intervals: HashMap<String, (f64, f64)>,
215}
216
217#[derive(Debug, Clone)]
219pub struct ComparativeAnalysis {
220 pub relative_improvements: HashMap<DDPerformanceMetric, f64>,
222 pub significance_tests: HashMap<DDPerformanceMetric, bool>,
224 pub effect_sizes: HashMap<DDPerformanceMetric, f64>,
226 pub performance_ranking: usize,
228}
229
230#[derive(Debug, Clone)]
232pub struct PerformanceTrends {
233 pub trend_slopes: HashMap<DDPerformanceMetric, f64>,
235 pub trend_significance: HashMap<DDPerformanceMetric, f64>,
237 pub seasonality: HashMap<DDPerformanceMetric, bool>,
239 pub outliers: HashMap<DDPerformanceMetric, Vec<usize>>,
241}
242
243pub struct DDPerformanceAnalyzer {
245 pub config: DDPerformanceConfig,
246 pub historical_data: Vec<DDPerformanceAnalysis>,
247}
248
249impl DDPerformanceAnalyzer {
250 pub fn new(config: DDPerformanceConfig) -> Self {
252 Self {
253 config,
254 historical_data: Vec::new(),
255 }
256 }
257
258 pub async fn analyze_performance(
260 &mut self,
261 sequence: &DDSequence,
262 executor: &dyn DDCircuitExecutor,
263 ) -> DeviceResult<DDPerformanceAnalysis> {
264 println!("Starting DD performance analysis");
265 let start_time = std::time::Instant::now();
266
267 let metrics = self
269 .calculate_performance_metrics(sequence, executor)
270 .await?;
271
272 let benchmark_results = if self.config.enable_benchmarking {
274 self.run_benchmarks(sequence, executor).await?
275 } else {
276 BenchmarkResults {
277 randomized_benchmarking: None,
278 process_tomography: None,
279 gate_set_tomography: None,
280 cross_entropy_benchmarking: None,
281 cycle_benchmarking: None,
282 }
283 };
284
285 let statistical_analysis = self.perform_statistical_analysis(&metrics, sequence)?;
287
288 let comparative_analysis = if !self.historical_data.is_empty() {
290 Some(self.perform_comparative_analysis(&metrics)?)
291 } else {
292 None
293 };
294
295 let performance_trends = self.analyze_performance_trends(&metrics)?;
297
298 let analysis = DDPerformanceAnalysis {
299 metrics,
300 benchmark_results,
301 statistical_analysis,
302 comparative_analysis,
303 performance_trends,
304 };
305
306 self.historical_data.push(analysis.clone());
307
308 println!(
309 "DD performance analysis completed in {:?}",
310 start_time.elapsed()
311 );
312 Ok(analysis)
313 }
314
315 async fn calculate_performance_metrics(
317 &self,
318 sequence: &DDSequence,
319 executor: &dyn DDCircuitExecutor,
320 ) -> DeviceResult<HashMap<DDPerformanceMetric, f64>> {
321 let mut metrics = HashMap::new();
322
323 for metric in &self.config.metrics {
324 let value = match metric {
325 DDPerformanceMetric::CoherenceTime => {
326 self.measure_coherence_time(sequence, executor).await?
327 }
328 DDPerformanceMetric::ProcessFidelity => {
329 self.measure_process_fidelity(sequence, executor).await?
330 }
331 DDPerformanceMetric::GateOverhead => sequence.properties.pulse_count as f64,
332 DDPerformanceMetric::TimeOverhead => {
333 sequence.duration * 1e6 }
335 DDPerformanceMetric::RobustnessScore => {
336 self.calculate_robustness_score(sequence, executor).await?
337 }
338 DDPerformanceMetric::NoiseSuppressionFactor => {
339 self.calculate_noise_suppression(sequence, executor).await?
340 }
341 DDPerformanceMetric::ResourceEfficiency => {
342 self.calculate_resource_efficiency(sequence, executor)
343 .await?
344 }
345 };
346
347 metrics.insert(metric.clone(), value);
348 }
349
350 Ok(metrics)
351 }
352
353 pub async fn measure_coherence_time(
355 &self,
356 sequence: &DDSequence,
357 _executor: &dyn DDCircuitExecutor,
358 ) -> DeviceResult<f64> {
359 let base_t2 = 50e-6; let enhancement_factor: f64 = sequence.properties.noise_suppression.values().sum();
362 let suppression_factor =
363 1.0 + enhancement_factor / sequence.properties.noise_suppression.len() as f64;
364
365 Ok(base_t2 * suppression_factor * 1e6) }
367
368 pub async fn measure_process_fidelity(
370 &self,
371 sequence: &DDSequence,
372 _executor: &dyn DDCircuitExecutor,
373 ) -> DeviceResult<f64> {
374 let base_fidelity = 0.99;
376 let order_factor = 0.001 * (sequence.properties.sequence_order as f64);
377 let overhead_penalty = -0.0001 * (sequence.properties.pulse_count as f64);
378
379 Ok((base_fidelity + order_factor + overhead_penalty)
380 .max(0.0)
381 .min(1.0))
382 }
383
384 pub async fn calculate_robustness_score(
386 &self,
387 sequence: &DDSequence,
388 _executor: &dyn DDCircuitExecutor,
389 ) -> DeviceResult<f64> {
390 let mut robustness = 0.0;
391
392 if sequence.properties.symmetry.time_reversal {
394 robustness += 0.25;
395 }
396 if sequence.properties.symmetry.phase_symmetry {
397 robustness += 0.25;
398 }
399 if sequence.properties.symmetry.rotational_symmetry {
400 robustness += 0.25;
401 }
402 if sequence.properties.symmetry.inversion_symmetry {
403 robustness += 0.25;
404 }
405
406 let noise_diversity = sequence.properties.noise_suppression.len() as f64 / 10.0;
408 robustness += noise_diversity.min(0.5);
409
410 Ok(robustness)
411 }
412
413 async fn calculate_noise_suppression(
415 &self,
416 sequence: &DDSequence,
417 _executor: &dyn DDCircuitExecutor,
418 ) -> DeviceResult<f64> {
419 let avg_suppression: f64 = sequence.properties.noise_suppression.values().sum::<f64>()
420 / sequence.properties.noise_suppression.len() as f64;
421 Ok(avg_suppression)
422 }
423
424 async fn calculate_resource_efficiency(
426 &self,
427 sequence: &DDSequence,
428 _executor: &dyn DDCircuitExecutor,
429 ) -> DeviceResult<f64> {
430 let coherence_improvement = self.measure_coherence_time(sequence, _executor).await? / 50.0; let resource_cost = sequence.properties.pulse_count as f64;
432
433 Ok(coherence_improvement / resource_cost.max(1.0))
434 }
435
436 async fn run_benchmarks(
438 &self,
439 sequence: &DDSequence,
440 executor: &dyn DDCircuitExecutor,
441 ) -> DeviceResult<BenchmarkResults> {
442 let mut results = BenchmarkResults {
443 randomized_benchmarking: None,
444 process_tomography: None,
445 gate_set_tomography: None,
446 cross_entropy_benchmarking: None,
447 cycle_benchmarking: None,
448 };
449
450 for protocol in &self.config.benchmarking_config.protocols {
451 match protocol {
452 BenchmarkProtocol::RandomizedBenchmarking => {
453 results.randomized_benchmarking =
454 Some(self.run_randomized_benchmarking(sequence, executor).await?);
455 }
456 BenchmarkProtocol::ProcessTomography => {
457 results.process_tomography =
458 Some(self.run_process_tomography(sequence, executor).await?);
459 }
460 BenchmarkProtocol::GateSetTomography => {
461 results.gate_set_tomography =
462 Some(self.run_gate_set_tomography(sequence, executor).await?);
463 }
464 BenchmarkProtocol::CrossEntropyBenchmarking => {
465 results.cross_entropy_benchmarking = Some(
466 self.run_cross_entropy_benchmarking(sequence, executor)
467 .await?,
468 );
469 }
470 BenchmarkProtocol::CycleBenchmarking => {
471 results.cycle_benchmarking =
472 Some(self.run_cycle_benchmarking(sequence, executor).await?);
473 }
474 }
475 }
476
477 Ok(results)
478 }
479
480 async fn run_randomized_benchmarking(
482 &self,
483 _sequence: &DDSequence,
484 _executor: &dyn DDCircuitExecutor,
485 ) -> DeviceResult<RandomizedBenchmarkingResults> {
486 Ok(RandomizedBenchmarkingResults {
488 gate_fidelity: 0.995,
489 confidence_interval: (0.990, 0.999),
490 decay_rate: 0.005,
491 sequences_tested: self.config.benchmarking_config.benchmark_runs,
492 p_value: 0.001,
493 })
494 }
495
496 async fn run_process_tomography(
498 &self,
499 _sequence: &DDSequence,
500 _executor: &dyn DDCircuitExecutor,
501 ) -> DeviceResult<ProcessTomographyResults> {
502 Ok(ProcessTomographyResults {
503 process_fidelity: 0.98,
504 process_matrix: Array2::eye(4),
505 eigenvalues: Array1::from_vec(vec![1.0, 0.99, 0.98, 0.97]),
506 completeness: 0.99,
507 })
508 }
509
510 async fn run_gate_set_tomography(
511 &self,
512 _sequence: &DDSequence,
513 _executor: &dyn DDCircuitExecutor,
514 ) -> DeviceResult<GateSetTomographyResults> {
515 let mut gate_fidelities = HashMap::new();
516 gate_fidelities.insert("X".to_string(), 0.995);
517 gate_fidelities.insert("Y".to_string(), 0.994);
518 gate_fidelities.insert("Z".to_string(), 0.999);
519
520 let mut spam_errors = HashMap::new();
521 spam_errors.insert("prep_error".to_string(), 0.001);
522 spam_errors.insert("meas_error".to_string(), 0.002);
523
524 Ok(GateSetTomographyResults {
525 gate_set_fidelity: 0.996,
526 gate_fidelities,
527 spam_errors,
528 model_consistency: 0.98,
529 })
530 }
531
532 async fn run_cross_entropy_benchmarking(
533 &self,
534 _sequence: &DDSequence,
535 _executor: &dyn DDCircuitExecutor,
536 ) -> DeviceResult<CrossEntropyResults> {
537 Ok(CrossEntropyResults {
538 cross_entropy_score: 2.1,
539 linear_xeb_fidelity: 0.92,
540 quantum_volume: 64,
541 confidence_level: 0.95,
542 })
543 }
544
545 async fn run_cycle_benchmarking(
546 &self,
547 _sequence: &DDSequence,
548 _executor: &dyn DDCircuitExecutor,
549 ) -> DeviceResult<CycleBenchmarkingResults> {
550 Ok(CycleBenchmarkingResults {
551 cycle_fidelity: 0.993,
552 systematic_error_rate: 0.002,
553 stochastic_error_rate: 0.005,
554 leakage_rate: 0.0001,
555 })
556 }
557
558 fn perform_statistical_analysis(
560 &self,
561 metrics: &HashMap<DDPerformanceMetric, f64>,
562 _sequence: &DDSequence,
563 ) -> DeviceResult<DDStatisticalAnalysis> {
564 let mut means = HashMap::new();
566 let mut standard_deviations = HashMap::new();
567 let mut medians = HashMap::new();
568 let mut percentiles = HashMap::new();
569 let mut ranges = HashMap::new();
570
571 for (metric, &value) in metrics {
573 let metric_name = format!("{:?}", metric);
574 means.insert(metric_name.clone(), value);
575 standard_deviations.insert(metric_name.clone(), value * 0.1); medians.insert(metric_name.clone(), value);
577 percentiles.insert(
578 metric_name.clone(),
579 vec![value * 0.9, value * 1.1, value * 1.2, value * 1.3],
580 );
581 ranges.insert(metric_name, (value * 0.8, value * 1.2));
582 }
583
584 let descriptive_stats = DescriptiveStatistics {
585 means,
586 standard_deviations,
587 medians,
588 percentiles,
589 ranges,
590 };
591
592 let hypothesis_tests = HypothesisTestResults {
593 t_test_results: HashMap::new(),
594 ks_test_results: HashMap::new(),
595 normality_tests: HashMap::new(),
596 };
597
598 let correlation_analysis = CorrelationAnalysis {
599 pearson_correlations: Array2::eye(metrics.len().max(1)),
600 spearman_correlations: Array2::eye(metrics.len().max(1)),
601 significant_correlations: Vec::new(),
602 };
603
604 let distribution_analysis = DistributionAnalysis {
605 best_fit_distributions: HashMap::new(),
606 distribution_parameters: HashMap::new(),
607 goodness_of_fit: HashMap::new(),
608 };
609
610 let confidence_intervals = ConfidenceIntervals {
611 mean_intervals: HashMap::new(),
612 bootstrap_intervals: HashMap::new(),
613 prediction_intervals: HashMap::new(),
614 };
615
616 Ok(DDStatisticalAnalysis {
617 descriptive_stats,
618 hypothesis_tests,
619 correlation_analysis,
620 distribution_analysis,
621 confidence_intervals,
622 })
623 }
624
625 fn calculate_descriptive_statistics(
627 &self,
628 data: &HashMap<String, Array1<f64>>,
629 ) -> DeviceResult<DescriptiveStatistics> {
630 let mut means = HashMap::new();
631 let mut standard_deviations = HashMap::new();
632 let mut medians = HashMap::new();
633 let mut percentiles = HashMap::new();
634 let mut ranges = HashMap::new();
635
636 for (metric_name, values) in data {
637 #[cfg(feature = "scirs2")]
638 let mean_val = mean(&values.view()).unwrap_or(0.0);
639 #[cfg(not(feature = "scirs2"))]
640 let mean_val = values.sum() / values.len() as f64;
641
642 #[cfg(feature = "scirs2")]
643 let std_val = std(&values.view(), 1, None).unwrap_or(1.0);
644 #[cfg(not(feature = "scirs2"))]
645 let std_val = {
646 let mean = mean_val;
647 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
648 / (values.len() - 1) as f64;
649 variance.sqrt()
650 };
651
652 means.insert(metric_name.clone(), mean_val);
653 standard_deviations.insert(metric_name.clone(), std_val);
654
655 let mut sorted = values.to_vec();
657 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
658 let len = sorted.len();
659
660 let median = if len % 2 == 0 {
661 (sorted[len / 2 - 1] + sorted[len / 2]) / 2.0
662 } else {
663 sorted[len / 2]
664 };
665 medians.insert(metric_name.clone(), median);
666
667 let p25 = sorted[len / 4];
668 let p75 = sorted[3 * len / 4];
669 let p95 = sorted[95 * len / 100];
670 let p99 = sorted[99 * len / 100];
671
672 percentiles.insert(metric_name.clone(), vec![p25, p75, p95, p99]);
673 ranges.insert(metric_name.clone(), (sorted[0], sorted[len - 1]));
674 }
675
676 Ok(DescriptiveStatistics {
677 means,
678 standard_deviations,
679 medians,
680 percentiles,
681 ranges,
682 })
683 }
684
685 fn perform_hypothesis_tests(
687 &self,
688 _data: &HashMap<String, Array1<f64>>,
689 ) -> DeviceResult<HypothesisTestResults> {
690 Ok(HypothesisTestResults {
691 t_test_results: HashMap::new(),
692 ks_test_results: HashMap::new(),
693 normality_tests: HashMap::new(),
694 })
695 }
696
697 fn perform_correlation_analysis(
698 &self,
699 data: &HashMap<String, Array1<f64>>,
700 ) -> DeviceResult<CorrelationAnalysis> {
701 let n_metrics = data.len();
702 let pearson_correlations = Array2::eye(n_metrics);
703 let spearman_correlations = Array2::eye(n_metrics);
704
705 Ok(CorrelationAnalysis {
706 pearson_correlations,
707 spearman_correlations,
708 significant_correlations: Vec::new(),
709 })
710 }
711
712 fn analyze_distributions(
713 &self,
714 _data: &HashMap<String, Array1<f64>>,
715 ) -> DeviceResult<DistributionAnalysis> {
716 Ok(DistributionAnalysis {
717 best_fit_distributions: HashMap::new(),
718 distribution_parameters: HashMap::new(),
719 goodness_of_fit: HashMap::new(),
720 })
721 }
722
723 fn calculate_confidence_intervals(
724 &self,
725 _data: &HashMap<String, Array1<f64>>,
726 ) -> DeviceResult<ConfidenceIntervals> {
727 Ok(ConfidenceIntervals {
728 mean_intervals: HashMap::new(),
729 bootstrap_intervals: HashMap::new(),
730 prediction_intervals: HashMap::new(),
731 })
732 }
733
734 fn perform_comparative_analysis(
735 &self,
736 _metrics: &HashMap<DDPerformanceMetric, f64>,
737 ) -> DeviceResult<ComparativeAnalysis> {
738 Ok(ComparativeAnalysis {
739 relative_improvements: HashMap::new(),
740 significance_tests: HashMap::new(),
741 effect_sizes: HashMap::new(),
742 performance_ranking: 1,
743 })
744 }
745
746 fn analyze_performance_trends(
747 &self,
748 _metrics: &HashMap<DDPerformanceMetric, f64>,
749 ) -> DeviceResult<PerformanceTrends> {
750 Ok(PerformanceTrends {
751 trend_slopes: HashMap::new(),
752 trend_significance: HashMap::new(),
753 seasonality: HashMap::new(),
754 outliers: HashMap::new(),
755 })
756 }
757}