quantrs2_core/
noise_characterization.rs

1//! Noise Characterization and Mitigation Protocols
2//!
3//! This module provides advanced noise characterization techniques and
4//! error mitigation strategies for near-term quantum devices (NISQ era).
5//!
6//! ## Characterization Techniques
7//! - **Randomized Benchmarking (RB)**: Characterizes average gate fidelity
8//! - **Gate Set Tomography (GST)**: Full characterization of gate set errors
9//! - **Quantum Process Tomography (QPT)**: Complete process matrix reconstruction
10//! - **Cross-Entropy Benchmarking (XEB)**: Validates quantum advantage
11//!
12//! ## Mitigation Strategies
13//! - **Zero-Noise Extrapolation (ZNE)**: Extrapolates to zero-noise limit
14//! - **Probabilistic Error Cancellation (PEC)**: Inverts noise channels
15//! - **Clifford Data Regression (CDR)**: Calibrates using Clifford circuits
16//! - **Dynamical Decoupling**: Suppresses decoherence during idle times
17
18use crate::{
19    error::{QuantRS2Error, QuantRS2Result},
20    gate::GateOp,
21    qubit::QubitId,
22};
23use scirs2_core::ndarray::{Array1, Array2, Array3};
24use scirs2_core::random::prelude::*;
25use scirs2_core::Complex64;
26use std::collections::HashMap;
27
28/// Noise model for quantum operations
29#[derive(Debug, Clone)]
30pub struct NoiseModel {
31    /// Single-qubit depolarizing noise probability
32    pub single_qubit_depolarizing: f64,
33    /// Two-qubit depolarizing noise probability
34    pub two_qubit_depolarizing: f64,
35    /// T1 relaxation time (in microseconds)
36    pub t1_relaxation: f64,
37    /// T2 dephasing time (in microseconds)
38    pub t2_dephasing: f64,
39    /// Readout error probability (0/1 flip)
40    pub readout_error: f64,
41    /// Gate duration (in microseconds)
42    pub gate_duration: f64,
43}
44
45impl Default for NoiseModel {
46    fn default() -> Self {
47        Self {
48            single_qubit_depolarizing: 0.001,
49            two_qubit_depolarizing: 0.01,
50            t1_relaxation: 50.0,
51            t2_dephasing: 70.0,
52            readout_error: 0.02,
53            gate_duration: 0.1,
54        }
55    }
56}
57
58impl NoiseModel {
59    /// Create a new noise model with specified parameters
60    pub fn new(
61        single_qubit_depolarizing: f64,
62        two_qubit_depolarizing: f64,
63        t1: f64,
64        t2: f64,
65        readout_error: f64,
66    ) -> Self {
67        Self {
68            single_qubit_depolarizing,
69            two_qubit_depolarizing,
70            t1_relaxation: t1,
71            t2_dephasing: t2,
72            readout_error,
73            gate_duration: 0.1,
74        }
75    }
76
77    /// Get the effective fidelity for a single-qubit gate
78    pub fn single_qubit_fidelity(&self) -> f64 {
79        let depolarizing_fidelity = 1.0 - self.single_qubit_depolarizing;
80        let coherence_decay = (-self.gate_duration / self.t1_relaxation).exp()
81            * (-self.gate_duration / self.t2_dephasing).exp();
82        depolarizing_fidelity * coherence_decay
83    }
84
85    /// Get the effective fidelity for a two-qubit gate
86    pub fn two_qubit_fidelity(&self) -> f64 {
87        let depolarizing_fidelity = 1.0 - self.two_qubit_depolarizing;
88        let coherence_decay = (-2.0 * self.gate_duration / self.t1_relaxation).exp()
89            * (-2.0 * self.gate_duration / self.t2_dephasing).exp();
90        depolarizing_fidelity * coherence_decay
91    }
92}
93
94/// Randomized Benchmarking Protocol
95///
96/// Characterizes the average gate fidelity by applying random Clifford sequences.
97pub struct RandomizedBenchmarking {
98    /// Number of qubits to benchmark
99    pub num_qubits: usize,
100    /// Sequence lengths to test
101    pub sequence_lengths: Vec<usize>,
102    /// Number of sequences per length
103    pub num_sequences: usize,
104    /// Random number generator
105    rng: ThreadRng,
106}
107
108impl RandomizedBenchmarking {
109    /// Create a new RB protocol
110    pub fn new(num_qubits: usize, max_sequence_length: usize, num_sequences: usize) -> Self {
111        let sequence_lengths: Vec<usize> = (1..=max_sequence_length).step_by(5).collect();
112        Self {
113            num_qubits,
114            sequence_lengths,
115            num_sequences,
116            rng: thread_rng(),
117        }
118    }
119
120    /// Run the randomized benchmarking protocol
121    ///
122    /// Returns average survival probabilities for each sequence length
123    pub fn run<F>(&mut self, mut apply_circuit: F) -> QuantRS2Result<RandomizedBenchmarkingResult>
124    where
125        F: FnMut(&[Box<dyn GateOp>]) -> Vec<Complex64>,
126    {
127        let mut survival_probabilities = HashMap::new();
128
129        // Clone sequence lengths to avoid borrow conflicts
130        let lengths = self.sequence_lengths.clone();
131
132        for &length in &lengths {
133            let mut total_survival = 0.0;
134
135            for _ in 0..self.num_sequences {
136                // Generate random Clifford sequence
137                let sequence = self.generate_clifford_sequence(length);
138
139                // Apply the sequence and measure survival probability
140                let final_state = apply_circuit(&sequence);
141                let survival = self.measure_survival_probability(&final_state);
142
143                total_survival += survival;
144            }
145
146            let avg_survival = total_survival / (self.num_sequences as f64);
147            survival_probabilities.insert(length, avg_survival);
148        }
149
150        // Fit exponential decay: P(m) = A * p^m + B
151        let (decay_rate, asymptote) = self.fit_exponential_decay(&survival_probabilities)?;
152
153        // Convert to average gate fidelity
154        let avg_gate_fidelity = 1.0 - (1.0 - decay_rate) / 2.0;
155
156        Ok(RandomizedBenchmarkingResult {
157            survival_probabilities,
158            decay_rate,
159            asymptote,
160            avg_gate_fidelity,
161            num_qubits: self.num_qubits,
162        })
163    }
164
165    /// Generate a random Clifford sequence of specified length
166    fn generate_clifford_sequence(&self, length: usize) -> Vec<Box<dyn GateOp>> {
167        // For simplicity, use a subset of Clifford gates
168        // In practice, this should sample from the full Clifford group
169        vec![]
170    }
171
172    /// Measure the survival probability (overlap with initial state)
173    fn measure_survival_probability(&self, state: &[Complex64]) -> f64 {
174        // For |0...0⟩ initial state
175        state[0].norm_sqr()
176    }
177
178    /// Fit exponential decay to RB data
179    fn fit_exponential_decay(&self, data: &HashMap<usize, f64>) -> QuantRS2Result<(f64, f64)> {
180        // Simple linear regression on log scale
181        // P(m) = A * p^m + B
182        // log(P(m) - B) = log(A) + m * log(p)
183
184        if data.len() < 3 {
185            return Err(QuantRS2Error::InvalidInput(
186                "Insufficient data points for fitting".to_string(),
187            ));
188        }
189
190        // Use last data point as estimate for asymptote B
191        let asymptote = *data
192            .values()
193            .min_by(|a, b| a.partial_cmp(b).unwrap())
194            .unwrap();
195
196        let mut sum_x = 0.0;
197        let mut sum_y = 0.0;
198        let mut sum_xy = 0.0;
199        let mut sum_xx = 0.0;
200        let mut count = 0;
201
202        for (&length, &survival) in data.iter() {
203            if survival > asymptote {
204                let x = length as f64;
205                let y = (survival - asymptote).ln();
206
207                sum_x += x;
208                sum_y += y;
209                sum_xy += x * y;
210                sum_xx += x * x;
211                count += 1;
212            }
213        }
214
215        if count < 2 {
216            return Err(QuantRS2Error::InvalidInput(
217                "Insufficient valid data points".to_string(),
218            ));
219        }
220
221        let n = count as f64;
222        // Standard linear regression formula: slope = (n*Σxy - Σx*Σy) / (n*Σx² - (Σx)²)
223        #[allow(clippy::suspicious_operation_groupings)]
224        let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
225        let decay_rate = slope.exp();
226
227        Ok((decay_rate, asymptote))
228    }
229}
230
231/// Result of randomized benchmarking
232#[derive(Debug, Clone)]
233pub struct RandomizedBenchmarkingResult {
234    /// Survival probabilities vs sequence length
235    pub survival_probabilities: HashMap<usize, f64>,
236    /// Fitted decay rate p
237    pub decay_rate: f64,
238    /// Fitted asymptote B
239    pub asymptote: f64,
240    /// Average gate fidelity
241    pub avg_gate_fidelity: f64,
242    /// Number of qubits benchmarked
243    pub num_qubits: usize,
244}
245
246/// Zero-Noise Extrapolation (ZNE) for error mitigation
247///
248/// Amplifies noise by integer factors and extrapolates to zero noise.
249pub struct ZeroNoiseExtrapolation {
250    /// Noise scaling factors
251    pub scaling_factors: Vec<f64>,
252    /// Extrapolation method
253    pub extrapolation_method: ExtrapolationMethod,
254}
255
256#[derive(Debug, Clone, Copy)]
257pub enum ExtrapolationMethod {
258    /// Linear extrapolation
259    Linear,
260    /// Polynomial fit
261    Polynomial(usize),
262    /// Exponential fit
263    Exponential,
264}
265
266impl Default for ZeroNoiseExtrapolation {
267    fn default() -> Self {
268        Self {
269            scaling_factors: vec![1.0, 2.0, 3.0],
270            extrapolation_method: ExtrapolationMethod::Linear,
271        }
272    }
273}
274
275impl ZeroNoiseExtrapolation {
276    /// Create a new ZNE protocol
277    pub fn new(scaling_factors: Vec<f64>, extrapolation_method: ExtrapolationMethod) -> Self {
278        Self {
279            scaling_factors,
280            extrapolation_method,
281        }
282    }
283
284    /// Apply ZNE to mitigate errors
285    ///
286    /// Runs the circuit with scaled noise and extrapolates to zero
287    pub fn mitigate<F>(&self, circuit_executor: F) -> QuantRS2Result<f64>
288    where
289        F: Fn(f64) -> f64,
290    {
291        // Execute circuit at each noise level
292        let mut noisy_results = Vec::new();
293        for &scale in &self.scaling_factors {
294            let result = circuit_executor(scale);
295            noisy_results.push((scale, result));
296        }
297
298        // Extrapolate to zero noise
299        let mitigated_result = match self.extrapolation_method {
300            ExtrapolationMethod::Linear => self.linear_extrapolation(&noisy_results)?,
301            ExtrapolationMethod::Polynomial(degree) => {
302                self.polynomial_extrapolation(&noisy_results, degree)?
303            }
304            ExtrapolationMethod::Exponential => self.exponential_extrapolation(&noisy_results)?,
305        };
306
307        Ok(mitigated_result)
308    }
309
310    /// Linear extrapolation to zero noise
311    fn linear_extrapolation(&self, data: &[(f64, f64)]) -> QuantRS2Result<f64> {
312        if data.len() < 2 {
313            return Err(QuantRS2Error::InvalidInput(
314                "Need at least 2 data points for linear extrapolation".to_string(),
315            ));
316        }
317
318        // Fit y = a + b*x
319        let n = data.len() as f64;
320        let sum_x: f64 = data.iter().map(|(x, _)| x).sum();
321        let sum_y: f64 = data.iter().map(|(_, y)| y).sum();
322        let sum_xy: f64 = data.iter().map(|(x, y)| x * y).sum();
323        let sum_xx: f64 = data.iter().map(|(x, _)| x * x).sum();
324
325        // Standard linear regression formula: slope = (n*Σxy - Σx*Σy) / (n*Σx² - (Σx)²)
326        #[allow(clippy::suspicious_operation_groupings)]
327        let b = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
328        let a = (sum_y - b * sum_x) / n;
329
330        // Extrapolate to x=0 (zero noise)
331        Ok(a)
332    }
333
334    /// Polynomial extrapolation
335    fn polynomial_extrapolation(&self, data: &[(f64, f64)], degree: usize) -> QuantRS2Result<f64> {
336        if data.len() < degree + 1 {
337            return Err(QuantRS2Error::InvalidInput(format!(
338                "Need at least {} data points for degree {} polynomial",
339                degree + 1,
340                degree
341            )));
342        }
343
344        // Build Vandermonde matrix for polynomial fitting: y = a_0 + a_1*x + a_2*x^2 + ...
345        let n = data.len();
346        let mut a_matrix = Array2::zeros((n, degree + 1));
347        let mut b_vector = Array1::zeros(n);
348
349        for (i, &(x, y)) in data.iter().enumerate() {
350            b_vector[i] = y;
351            for j in 0..=degree {
352                a_matrix[[i, j]] = x.powi(j as i32);
353            }
354        }
355
356        // Solve least squares: A^T A c = A^T b
357        let at = a_matrix.t();
358        let ata = at.dot(&a_matrix);
359        let atb = at.dot(&b_vector);
360
361        // Solve using scirs2_linalg for better numerical stability
362        // For now, use a simple inversion approach
363        // In production, would use QR decomposition or SVD from scirs2_linalg
364
365        // Extract coefficients using normal equations
366        let coeffs = self.solve_normal_equations(&ata, &atb)?;
367
368        // Evaluate polynomial at x=0 (zero noise)
369        Ok(coeffs[0])
370    }
371
372    /// Solve normal equations A^T A c = A^T b
373    fn solve_normal_equations(
374        &self,
375        ata: &Array2<f64>,
376        atb: &Array1<f64>,
377    ) -> QuantRS2Result<Array1<f64>> {
378        let n = ata.nrows();
379        let mut augmented = Array2::zeros((n, n + 1));
380
381        // Build augmented matrix [A^T A | A^T b]
382        for i in 0..n {
383            for j in 0..n {
384                augmented[[i, j]] = ata[[i, j]];
385            }
386            augmented[[i, n]] = atb[i];
387        }
388
389        // Gaussian elimination with partial pivoting
390        for k in 0..n {
391            // Find pivot
392            let mut max_idx = k;
393            let mut max_val = augmented[[k, k]].abs();
394            for i in (k + 1)..n {
395                let val = augmented[[i, k]].abs();
396                if val > max_val {
397                    max_val = val;
398                    max_idx = i;
399                }
400            }
401
402            // Swap rows if needed
403            if max_idx != k {
404                for j in 0..=n {
405                    let tmp = augmented[[k, j]];
406                    augmented[[k, j]] = augmented[[max_idx, j]];
407                    augmented[[max_idx, j]] = tmp;
408                }
409            }
410
411            // Check for singular matrix
412            if augmented[[k, k]].abs() < 1e-10 {
413                return Err(QuantRS2Error::ComputationError(
414                    "Singular matrix in polynomial fitting".to_string(),
415                ));
416            }
417
418            // Eliminate column
419            for i in (k + 1)..n {
420                let factor = augmented[[i, k]] / augmented[[k, k]];
421                for j in k..=n {
422                    augmented[[i, j]] -= factor * augmented[[k, j]];
423                }
424            }
425        }
426
427        // Back substitution
428        let mut coeffs = Array1::zeros(n);
429        for i in (0..n).rev() {
430            let mut sum = augmented[[i, n]];
431            for j in (i + 1)..n {
432                sum -= augmented[[i, j]] * coeffs[j];
433            }
434            coeffs[i] = sum / augmented[[i, i]];
435        }
436
437        Ok(coeffs)
438    }
439
440    /// Exponential extrapolation
441    fn exponential_extrapolation(&self, data: &[(f64, f64)]) -> QuantRS2Result<f64> {
442        // Fit y = a * exp(b*x)
443        // log(y) = log(a) + b*x
444
445        let log_data: Vec<(f64, f64)> = data
446            .iter()
447            .filter(|(_, y)| *y > 0.0)
448            .map(|(x, y)| (*x, y.ln()))
449            .collect();
450
451        if log_data.len() < 2 {
452            return Err(QuantRS2Error::InvalidInput(
453                "Insufficient positive data for exponential fit".to_string(),
454            ));
455        }
456
457        // Fit linear on log scale
458        let n = log_data.len() as f64;
459        let sum_x: f64 = log_data.iter().map(|(x, _)| x).sum();
460        let sum_y: f64 = log_data.iter().map(|(_, y)| y).sum();
461        let sum_xy: f64 = log_data.iter().map(|(x, y)| x * y).sum();
462        let sum_xx: f64 = log_data.iter().map(|(x, _)| x * x).sum();
463
464        // Standard linear regression formula: slope = (n*Σxy - Σx*Σy) / (n*Σx² - (Σx)²)
465        #[allow(clippy::suspicious_operation_groupings)]
466        let b = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
467        let log_a = (sum_y - b * sum_x) / n;
468        let a = log_a.exp();
469
470        // Extrapolate to x=0
471        Ok(a)
472    }
473}
474
475/// Probabilistic Error Cancellation (PEC)
476///
477/// Inverts noise channels by sampling from quasi-probability distributions.
478pub struct ProbabilisticErrorCancellation {
479    /// Noise model to invert
480    pub noise_model: NoiseModel,
481    /// Number of samples for quasi-probability sampling
482    pub num_samples: usize,
483}
484
485impl ProbabilisticErrorCancellation {
486    /// Create a new PEC protocol
487    pub fn new(noise_model: NoiseModel, num_samples: usize) -> Self {
488        Self {
489            noise_model,
490            num_samples,
491        }
492    }
493
494    /// Apply PEC to mitigate errors
495    ///
496    /// Returns the error-mitigated expectation value
497    pub fn mitigate<F>(&self, ideal_executor: F) -> QuantRS2Result<f64>
498    where
499        F: Fn(&[Box<dyn GateOp>]) -> f64,
500    {
501        // Decompose noisy operations into quasi-probability distribution
502        // over ideal operations
503
504        // For depolarizing noise: E = (1-p) I + p * depolarizing
505        // Inversion: I = (E - p * depolarizing) / (1-p)
506
507        let fidelity = self.noise_model.single_qubit_fidelity();
508        let noise_strength = 1.0 - fidelity;
509
510        // Compute quasi-probability coefficients
511        let ideal_weight = 1.0 / (1.0 - noise_strength);
512        let noise_weight = -noise_strength / (1.0 - noise_strength);
513
514        // Sample and compute weighted expectation
515        // This is a simplified implementation
516        // Full PEC requires sampling from the quasi-probability distribution
517
518        let ideal_result = ideal_executor(&[]);
519
520        // Apply quasi-probability weighting
521        let mitigated = ideal_weight * ideal_result;
522
523        Ok(mitigated)
524    }
525}
526
527/// Dynamical Decoupling Protocol
528///
529/// Suppresses decoherence by applying pulse sequences during idle times.
530pub struct DynamicalDecoupling {
531    /// Type of DD sequence
532    pub sequence_type: DDSequenceType,
533    /// Number of pulses in the sequence
534    pub num_pulses: usize,
535}
536
537#[derive(Debug, Clone, Copy, PartialEq, Eq)]
538pub enum DDSequenceType {
539    /// Simple spin echo (X-X)
540    SpinEcho,
541    /// Carr-Purcell (XY-XY-...)
542    CarrPurcell,
543    /// CPMG (X-Y-X-Y-...)
544    CPMG,
545    /// XY4 (XYXY)
546    XY4,
547    /// XY8 (XYXYYXYX)
548    XY8,
549    /// UDD (Uhrig dynamical decoupling)
550    UDD,
551}
552
553impl DynamicalDecoupling {
554    /// Create a new DD protocol
555    pub fn new(sequence_type: DDSequenceType, num_pulses: usize) -> Self {
556        Self {
557            sequence_type,
558            num_pulses,
559        }
560    }
561
562    /// Generate the DD pulse sequence
563    ///
564    /// Returns pulse timings (normalized to \[0,1\]) and gate types
565    pub fn generate_sequence(&self, idle_time: f64) -> Vec<(f64, DDPulse)> {
566        match self.sequence_type {
567            DDSequenceType::SpinEcho => {
568                vec![(idle_time / 2.0, DDPulse::PauliX)]
569            }
570            DDSequenceType::CarrPurcell => {
571                let mut sequence = Vec::new();
572                for i in 1..=self.num_pulses {
573                    let time = (i as f64) * idle_time / (self.num_pulses as f64 + 1.0);
574                    sequence.push((time, DDPulse::PauliX));
575                }
576                sequence
577            }
578            DDSequenceType::CPMG => {
579                let mut sequence = Vec::new();
580                for i in 1..=self.num_pulses {
581                    let time = (i as f64) * idle_time / (self.num_pulses as f64 + 1.0);
582                    let pulse = if i % 2 == 1 {
583                        DDPulse::PauliX
584                    } else {
585                        DDPulse::PauliY
586                    };
587                    sequence.push((time, pulse));
588                }
589                sequence
590            }
591            DDSequenceType::XY4 => {
592                let pulses = [
593                    DDPulse::PauliX,
594                    DDPulse::PauliY,
595                    DDPulse::PauliX,
596                    DDPulse::PauliY,
597                ];
598                let mut sequence = Vec::new();
599                for i in 1..=4 {
600                    let time = (i as f64) * idle_time / 5.0;
601                    sequence.push((time, pulses[i - 1]));
602                }
603                sequence
604            }
605            DDSequenceType::XY8 => {
606                let pulses = [
607                    DDPulse::PauliX,
608                    DDPulse::PauliY,
609                    DDPulse::PauliX,
610                    DDPulse::PauliY,
611                    DDPulse::PauliY,
612                    DDPulse::PauliX,
613                    DDPulse::PauliY,
614                    DDPulse::PauliX,
615                ];
616                let mut sequence = Vec::new();
617                for i in 1..=8 {
618                    let time = (i as f64) * idle_time / 9.0;
619                    sequence.push((time, pulses[i - 1]));
620                }
621                sequence
622            }
623            DDSequenceType::UDD => {
624                // Uhrig DD: optimal pulse timings
625                let mut sequence = Vec::new();
626                for k in 1..=self.num_pulses {
627                    let angle =
628                        std::f64::consts::PI * (k as f64) / (2.0 * (self.num_pulses as f64 + 1.0));
629                    let time = idle_time * angle.sin().powi(2);
630                    sequence.push((time, DDPulse::PauliX));
631                }
632                sequence
633            }
634        }
635    }
636
637    /// Estimate coherence time improvement factor
638    pub fn coherence_improvement_factor(&self, t2: f64, pulse_duration: f64) -> f64 {
639        // Estimate based on number of pulses and pulse quality
640        let n = self.num_pulses as f64;
641        let pulse_error = pulse_duration / t2;
642
643        // Improvement factor ≈ n^2 for ideal pulses
644        // Reduced by pulse errors
645        (n * n) / (1.0 + n * pulse_error)
646    }
647}
648
649#[derive(Debug, Clone, Copy, PartialEq, Eq)]
650pub enum DDPulse {
651    PauliX,
652    PauliY,
653    PauliZ,
654}
655
656/// Cross-Entropy Benchmarking (XEB)
657///
658/// Validates quantum advantage by comparing to classical simulations.
659pub struct CrossEntropyBenchmarking {
660    /// Number of qubits
661    pub num_qubits: usize,
662    /// Circuit depth
663    pub circuit_depth: usize,
664    /// Number of circuits to average over
665    pub num_circuits: usize,
666}
667
668impl CrossEntropyBenchmarking {
669    /// Create a new XEB protocol
670    pub fn new(num_qubits: usize, circuit_depth: usize, num_circuits: usize) -> Self {
671        Self {
672            num_qubits,
673            circuit_depth,
674            num_circuits,
675        }
676    }
677
678    /// Run XEB protocol
679    ///
680    /// Returns cross-entropy benchmark fidelity
681    pub fn run<F, G>(
682        &self,
683        quantum_executor: F,
684        classical_simulator: G,
685    ) -> QuantRS2Result<CrossEntropyResult>
686    where
687        F: Fn(usize) -> Vec<usize>, // Returns measured bitstrings
688        G: Fn(usize) -> Vec<f64>,   // Returns ideal probabilities
689    {
690        let mut total_cross_entropy = 0.0;
691
692        for circuit_id in 0..self.num_circuits {
693            // Get quantum measurements
694            let measurements = quantum_executor(circuit_id);
695
696            // Get ideal probabilities from classical simulation
697            let ideal_probs = classical_simulator(circuit_id);
698
699            // Compute cross-entropy
700            let cross_entropy = self.compute_cross_entropy(&measurements, &ideal_probs);
701            total_cross_entropy += cross_entropy;
702        }
703
704        let avg_cross_entropy = total_cross_entropy / (self.num_circuits as f64);
705
706        // Linear XEB fidelity: F_XEB = 2^n * (cross_entropy - 1) + 1
707        let fidelity = (1 << self.num_qubits) as f64 * avg_cross_entropy;
708
709        Ok(CrossEntropyResult {
710            cross_entropy: avg_cross_entropy,
711            fidelity,
712            num_qubits: self.num_qubits,
713            circuit_depth: self.circuit_depth,
714        })
715    }
716
717    /// Compute cross-entropy between measurements and ideal distribution
718    fn compute_cross_entropy(&self, measurements: &[usize], ideal_probs: &[f64]) -> f64 {
719        let num_measurements = measurements.len() as f64;
720        let mut cross_entropy = 0.0;
721
722        for &bitstring in measurements {
723            if bitstring < ideal_probs.len() {
724                cross_entropy += ideal_probs[bitstring];
725            }
726        }
727
728        cross_entropy / num_measurements
729    }
730}
731
732#[derive(Debug, Clone)]
733pub struct CrossEntropyResult {
734    /// Average cross-entropy
735    pub cross_entropy: f64,
736    /// XEB fidelity
737    pub fidelity: f64,
738    /// Number of qubits
739    pub num_qubits: usize,
740    /// Circuit depth
741    pub circuit_depth: usize,
742}
743
744#[cfg(test)]
745mod tests {
746    use super::*;
747
748    #[test]
749    fn test_noise_model() {
750        let noise = NoiseModel::default();
751        let fidelity = noise.single_qubit_fidelity();
752        assert!(fidelity > 0.9 && fidelity < 1.0);
753
754        let two_qubit_fidelity = noise.two_qubit_fidelity();
755        assert!(two_qubit_fidelity > 0.8 && two_qubit_fidelity < 1.0);
756        assert!(two_qubit_fidelity < fidelity); // Two-qubit gates less reliable
757    }
758
759    #[test]
760    fn test_zne_linear_extrapolation() {
761        let zne = ZeroNoiseExtrapolation::default();
762
763        // Mock executor: result = 1.0 - 0.1 * noise_scale
764        let executor = |scale: f64| 1.0 - 0.1 * scale;
765
766        let mitigated = zne.mitigate(executor).unwrap();
767
768        // Should extrapolate to ~1.0 at scale=0
769        assert!((mitigated - 1.0).abs() < 0.05);
770        println!("ZNE mitigated result: {}", mitigated);
771    }
772
773    #[test]
774    fn test_dynamical_decoupling_sequences() {
775        let dd_spin_echo = DynamicalDecoupling::new(DDSequenceType::SpinEcho, 1);
776        let sequence = dd_spin_echo.generate_sequence(1.0);
777        assert_eq!(sequence.len(), 1);
778        assert_eq!(sequence[0].1, DDPulse::PauliX);
779
780        let dd_xy4 = DynamicalDecoupling::new(DDSequenceType::XY4, 4);
781        let sequence = dd_xy4.generate_sequence(1.0);
782        assert_eq!(sequence.len(), 4);
783
784        let dd_udd = DynamicalDecoupling::new(DDSequenceType::UDD, 5);
785        let sequence = dd_udd.generate_sequence(1.0);
786        assert_eq!(sequence.len(), 5);
787    }
788
789    #[test]
790    fn test_coherence_improvement() {
791        let dd = DynamicalDecoupling::new(DDSequenceType::CPMG, 10);
792        let improvement = dd.coherence_improvement_factor(100.0, 0.1);
793        assert!(improvement > 1.0);
794        println!("DD coherence improvement: {}x", improvement);
795    }
796
797    #[test]
798    fn test_cross_entropy_benchmarking() {
799        let xeb = CrossEntropyBenchmarking::new(5, 10, 100);
800
801        // Mock quantum executor
802        let quantum_exec = |_circuit_id: usize| vec![0, 1, 2, 3, 4];
803
804        // Mock classical simulator (uniform for simplicity)
805        let classical_sim = |_circuit_id: usize| vec![0.0312; 32]; // 1/32 for 5 qubits
806
807        let result = xeb.run(quantum_exec, classical_sim).unwrap();
808
809        assert!(result.cross_entropy > 0.0);
810        assert!(result.fidelity > 0.0);
811        println!("XEB fidelity: {}", result.fidelity);
812    }
813
814    #[test]
815    fn test_zne_polynomial_extrapolation() {
816        let zne = ZeroNoiseExtrapolation::new(
817            vec![1.0, 2.0, 3.0, 4.0],
818            ExtrapolationMethod::Polynomial(2),
819        );
820
821        // Mock executor: result = 1.0 - 0.05 * scale - 0.01 * scale^2
822        let executor = |scale: f64| 1.0 - 0.05 * scale - 0.01 * scale * scale;
823
824        let mitigated = zne.mitigate(executor).unwrap();
825
826        // Should extrapolate close to 1.0 at scale=0
827        assert!((mitigated - 1.0).abs() < 0.1);
828        println!("ZNE polynomial mitigated result: {}", mitigated);
829    }
830
831    #[test]
832    fn test_zne_exponential_extrapolation() {
833        let zne =
834            ZeroNoiseExtrapolation::new(vec![1.0, 2.0, 3.0], ExtrapolationMethod::Exponential);
835
836        // Mock executor: result = 0.9 * exp(-0.1 * scale)
837        let executor = |scale: f64| 0.9 * (-0.1 * scale).exp();
838
839        let mitigated = zne.mitigate(executor).unwrap();
840
841        // Should extrapolate close to 0.9 at scale=0
842        assert!((mitigated - 0.9).abs() < 0.05);
843        println!("ZNE exponential mitigated result: {}", mitigated);
844    }
845}