quantrs2_sim/
noise_extrapolation.rs

1//! Noise extrapolation techniques for quantum error mitigation.
2//!
3//! This module implements various error mitigation techniques including
4//! Zero-Noise Extrapolation (ZNE), Virtual Distillation, Symmetry Verification,
5//! and other methods to extrapolate quantum results to the zero-noise limit.
6
7use scirs2_core::ndarray::Array2;
8use scirs2_core::parallel_ops::{
9    IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator,
10};
11use scirs2_core::Complex64;
12use serde::{Deserialize, Serialize};
13
14use crate::error::{Result, SimulatorError};
15
16/// Zero-Noise Extrapolation result
17#[derive(Debug, Clone, Serialize, Deserialize)]
18pub struct ZNEResult {
19    /// Extrapolated expectation value at zero noise
20    pub zero_noise_value: f64,
21    /// Extrapolation error estimate
22    pub error_estimate: f64,
23    /// Noise scaling factors used
24    pub noise_factors: Vec<f64>,
25    /// Measured expectation values at each noise level
26    pub measured_values: Vec<f64>,
27    /// Measurement uncertainties
28    pub uncertainties: Vec<f64>,
29    /// Extrapolation method used
30    pub method: ExtrapolationMethod,
31    /// Confidence in the extrapolation
32    pub confidence: f64,
33    /// Fitting statistics
34    pub fit_stats: FitStatistics,
35}
36
37/// Virtual Distillation result
38#[derive(Debug, Clone, Serialize, Deserialize)]
39pub struct VirtualDistillationResult {
40    /// Mitigated expectation value
41    pub mitigated_value: f64,
42    /// Overhead factor (number of additional circuits)
43    pub overhead: usize,
44    /// Distillation efficiency
45    pub efficiency: f64,
46    /// Error reduction factor
47    pub error_reduction: f64,
48}
49
50/// Symmetry Verification result
51#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct SymmetryVerificationResult {
53    /// Corrected expectation value
54    pub corrected_value: f64,
55    /// Symmetry violation measure
56    pub symmetry_violation: f64,
57    /// Post-selection probability
58    pub post_selection_prob: f64,
59    /// Symmetries tested
60    pub symmetries: Vec<String>,
61}
62
63/// Extrapolation methods for ZNE
64#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
65pub enum ExtrapolationMethod {
66    /// Linear extrapolation
67    Linear,
68    /// Exponential extrapolation
69    Exponential,
70    /// Polynomial extrapolation (order specified)
71    Polynomial(usize),
72    /// Richardson extrapolation
73    Richardson,
74    /// Adaptive method selection
75    Adaptive,
76}
77
78/// Noise scaling methods
79#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
80pub enum NoiseScalingMethod {
81    /// Global unitary folding
82    UnitaryFolding,
83    /// Local gate folding
84    LocalFolding,
85    /// Parameter scaling
86    ParameterScaling,
87    /// Identity insertion
88    IdentityInsertion,
89    /// Pauli twirling
90    PauliTwirling,
91}
92
93/// Fitting statistics for extrapolation
94#[derive(Debug, Clone, Default, Serialize, Deserialize)]
95pub struct FitStatistics {
96    /// R-squared value
97    pub r_squared: f64,
98    /// Reduced chi-squared
99    pub chi_squared_reduced: f64,
100    /// Akaike Information Criterion
101    pub aic: f64,
102    /// Number of parameters in fit
103    pub num_parameters: usize,
104    /// Residuals
105    pub residuals: Vec<f64>,
106}
107
108/// Zero-Noise Extrapolation engine
109#[derive(Debug, Clone)]
110pub struct ZeroNoiseExtrapolator {
111    /// Noise scaling method
112    scaling_method: NoiseScalingMethod,
113    /// Extrapolation method
114    extrapolation_method: ExtrapolationMethod,
115    /// Noise factors to sample
116    noise_factors: Vec<f64>,
117    /// Number of shots per noise level
118    shots_per_level: usize,
119    /// Maximum polynomial order for adaptive fitting
120    max_poly_order: usize,
121}
122
123impl ZeroNoiseExtrapolator {
124    /// Create new ZNE extrapolator
125    #[must_use]
126    pub const fn new(
127        scaling_method: NoiseScalingMethod,
128        extrapolation_method: ExtrapolationMethod,
129        noise_factors: Vec<f64>,
130        shots_per_level: usize,
131    ) -> Self {
132        Self {
133            scaling_method,
134            extrapolation_method,
135            noise_factors,
136            shots_per_level,
137            max_poly_order: 4,
138        }
139    }
140
141    /// Default configuration for ZNE
142    #[must_use]
143    pub fn default_config() -> Self {
144        Self::new(
145            NoiseScalingMethod::UnitaryFolding,
146            ExtrapolationMethod::Linear,
147            vec![1.0, 1.5, 2.0, 2.5, 3.0],
148            1000,
149        )
150    }
151
152    /// Perform zero-noise extrapolation
153    pub fn extrapolate<F>(&self, noisy_executor: F, observable: &str) -> Result<ZNEResult>
154    where
155        F: Fn(f64) -> Result<f64> + Sync + Send,
156    {
157        let start_time = std::time::Instant::now();
158
159        // Measure expectation values at different noise levels
160        let measurements: Result<Vec<(f64, f64, f64)>> = self
161            .noise_factors
162            .par_iter()
163            .map(|&noise_factor| {
164                // Execute circuit with scaled noise
165                let measured_value = noisy_executor(noise_factor)?;
166
167                // Estimate uncertainty (would use shot noise in practice)
168                let uncertainty = self.estimate_measurement_uncertainty(measured_value);
169
170                Ok((noise_factor, measured_value, uncertainty))
171            })
172            .collect();
173
174        let measurements = measurements?;
175        let (noise_factors, measured_values, uncertainties): (Vec<f64>, Vec<f64>, Vec<f64>) =
176            measurements.unzip3();
177
178        // Perform extrapolation based on method
179        let (zero_noise_value, error_estimate, fit_stats) = match self.extrapolation_method {
180            ExtrapolationMethod::Linear => {
181                self.linear_extrapolation(&noise_factors, &measured_values, &uncertainties)?
182            }
183            ExtrapolationMethod::Exponential => {
184                self.exponential_extrapolation(&noise_factors, &measured_values, &uncertainties)?
185            }
186            ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
187                &noise_factors,
188                &measured_values,
189                &uncertainties,
190                order,
191            )?,
192            ExtrapolationMethod::Richardson => {
193                self.richardson_extrapolation(&noise_factors, &measured_values)?
194            }
195            ExtrapolationMethod::Adaptive => {
196                self.adaptive_extrapolation(&noise_factors, &measured_values, &uncertainties)?
197            }
198        };
199
200        // Calculate confidence based on fit quality
201        let confidence = self.calculate_confidence(&fit_stats);
202
203        Ok(ZNEResult {
204            zero_noise_value,
205            error_estimate,
206            noise_factors,
207            measured_values,
208            uncertainties,
209            method: self.extrapolation_method,
210            confidence,
211            fit_stats,
212        })
213    }
214
215    /// Linear extrapolation y = a + b*x, extrapolate to x=0
216    fn linear_extrapolation(
217        &self,
218        noise_factors: &[f64],
219        measured_values: &[f64],
220        uncertainties: &[f64],
221    ) -> Result<(f64, f64, FitStatistics)> {
222        if noise_factors.len() < 2 {
223            return Err(SimulatorError::InvalidInput(
224                "Need at least 2 data points for linear extrapolation".to_string(),
225            ));
226        }
227
228        // Weighted least squares fit
229        let (a, b, fit_stats) =
230            self.weighted_linear_fit(noise_factors, measured_values, uncertainties)?;
231
232        // Extrapolate to zero noise (x=0)
233        let zero_noise_value = a;
234
235        // Error estimate from fit uncertainty
236        let error_estimate = fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
237            / fit_stats.residuals.len() as f64;
238
239        Ok((zero_noise_value, error_estimate, fit_stats))
240    }
241
242    /// Exponential extrapolation y = a * exp(b*x), extrapolate to x=0
243    fn exponential_extrapolation(
244        &self,
245        noise_factors: &[f64],
246        measured_values: &[f64],
247        uncertainties: &[f64],
248    ) -> Result<(f64, f64, FitStatistics)> {
249        // Transform to linear: ln(y) = ln(a) + b*x
250        let log_values: Vec<f64> = measured_values
251            .iter()
252            .map(|&y| if y > 0.0 { y.ln() } else { f64::NEG_INFINITY })
253            .collect();
254
255        // Check for negative values
256        if log_values.iter().any(|&x| x.is_infinite()) {
257            return Err(SimulatorError::NumericalError(
258                "Cannot take logarithm of non-positive values for exponential fit".to_string(),
259            ));
260        }
261
262        // Linear fit in log space
263        let log_uncertainties: Vec<f64> = uncertainties.iter().zip(measured_values.iter())
264            .map(|(&u, &y)| u / y.abs()) // Propagate uncertainty: σ(ln(y)) ≈ σ(y)/|y|
265            .collect();
266
267        let (ln_a, b, mut fit_stats) =
268            self.weighted_linear_fit(noise_factors, &log_values, &log_uncertainties)?;
269
270        // Transform back
271        let a = ln_a.exp();
272        let zero_noise_value = a; // At x=0: y = a * exp(0) = a
273
274        // Error propagation
275        let error_estimate = a * fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
276            / fit_stats.residuals.len() as f64;
277
278        // Transform residuals back to original space
279        fit_stats.residuals = fit_stats
280            .residuals
281            .iter()
282            .zip(noise_factors.iter().enumerate())
283            .map(|(&_res, (idx, &x))| a.mul_add((b * x).exp(), -measured_values[idx]))
284            .collect();
285
286        Ok((zero_noise_value, error_estimate, fit_stats))
287    }
288
289    /// Polynomial extrapolation
290    fn polynomial_extrapolation(
291        &self,
292        noise_factors: &[f64],
293        measured_values: &[f64],
294        uncertainties: &[f64],
295        order: usize,
296    ) -> Result<(f64, f64, FitStatistics)> {
297        if noise_factors.len() <= order {
298            return Err(SimulatorError::InvalidInput(format!(
299                "Need more than {order} data points for order {order} polynomial"
300            )));
301        }
302
303        // Construct Vandermonde matrix
304        let n = noise_factors.len();
305        let mut design_matrix = Array2::zeros((n, order + 1));
306
307        for (i, &x) in noise_factors.iter().enumerate() {
308            for j in 0..=order {
309                design_matrix[[i, j]] = x.powi(j as i32);
310            }
311        }
312
313        // Weighted least squares (simplified implementation)
314        let coefficients =
315            self.solve_weighted_least_squares(&design_matrix, measured_values, uncertainties)?;
316
317        // Zero-noise value is the constant term (coefficient of x^0)
318        let zero_noise_value = coefficients[0];
319
320        // Calculate residuals
321        let mut residuals = Vec::with_capacity(n);
322        for (i, &x) in noise_factors.iter().enumerate() {
323            let predicted: f64 = coefficients
324                .iter()
325                .enumerate()
326                .map(|(j, &coeff)| coeff * x.powi(j as i32))
327                .sum();
328            residuals.push(measured_values[i] - predicted);
329        }
330
331        let error_estimate =
332            residuals.iter().map(|r| r.abs()).sum::<f64>() / residuals.len() as f64;
333
334        let fit_stats = FitStatistics {
335            residuals,
336            num_parameters: order + 1,
337            ..Default::default()
338        };
339
340        Ok((zero_noise_value, error_estimate, fit_stats))
341    }
342
343    /// Richardson extrapolation
344    fn richardson_extrapolation(
345        &self,
346        noise_factors: &[f64],
347        measured_values: &[f64],
348    ) -> Result<(f64, f64, FitStatistics)> {
349        if noise_factors.len() < 3 {
350            return Err(SimulatorError::InvalidInput(
351                "Richardson extrapolation requires at least 3 data points".to_string(),
352            ));
353        }
354
355        // Use first three points for Richardson extrapolation
356        let x1 = noise_factors[0];
357        let x2 = noise_factors[1];
358        let x3 = noise_factors[2];
359        let y1 = measured_values[0];
360        let y2 = measured_values[1];
361        let y3 = measured_values[2];
362
363        // Richardson extrapolation formula assuming y = a + b*x + c*x^2
364        let denominator = (x1 - x2) * (x1 - x3) * (x2 - x3);
365        if denominator.abs() < 1e-12 {
366            return Err(SimulatorError::NumericalError(
367                "Noise factors too close for Richardson extrapolation".to_string(),
368            ));
369        }
370
371        let a = (y3 * x1 * x2).mul_add(
372            x1 - x2,
373            (y1 * x2 * x3).mul_add(x2 - x3, y2 * x1 * x3 * (x3 - x1)),
374        ) / denominator;
375
376        let zero_noise_value = a;
377
378        // Error estimate (simplified)
379        let error_estimate = (y1 - y2).abs().max((y2 - y3).abs()) * 0.1;
380
381        let fit_stats = FitStatistics {
382            num_parameters: 3,
383            ..Default::default()
384        };
385
386        Ok((zero_noise_value, error_estimate, fit_stats))
387    }
388
389    /// Adaptive extrapolation method selection
390    fn adaptive_extrapolation(
391        &self,
392        noise_factors: &[f64],
393        measured_values: &[f64],
394        uncertainties: &[f64],
395    ) -> Result<(f64, f64, FitStatistics)> {
396        let mut best_result = None;
397        let mut best_aic = f64::INFINITY;
398
399        // Try different extrapolation methods
400        let methods = vec![
401            ExtrapolationMethod::Linear,
402            ExtrapolationMethod::Exponential,
403            ExtrapolationMethod::Polynomial(2),
404            ExtrapolationMethod::Polynomial(3),
405        ];
406
407        for method in methods {
408            let result = match method {
409                ExtrapolationMethod::Linear => {
410                    self.linear_extrapolation(noise_factors, measured_values, uncertainties)
411                }
412                ExtrapolationMethod::Exponential => {
413                    self.exponential_extrapolation(noise_factors, measured_values, uncertainties)
414                }
415                ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
416                    noise_factors,
417                    measured_values,
418                    uncertainties,
419                    order,
420                ),
421                _ => continue,
422            };
423
424            if let Ok((value, error, stats)) = result {
425                let aic = stats.aic;
426                if aic < best_aic {
427                    best_aic = aic;
428                    best_result = Some((value, error, stats));
429                }
430            }
431        }
432
433        best_result.ok_or_else(|| {
434            SimulatorError::ComputationError("No extrapolation method succeeded".to_string())
435        })
436    }
437
438    /// Weighted linear fit
439    fn weighted_linear_fit(
440        &self,
441        x: &[f64],
442        y: &[f64],
443        weights: &[f64],
444    ) -> Result<(f64, f64, FitStatistics)> {
445        let n = x.len();
446        if n < 2 {
447            return Err(SimulatorError::InvalidInput(
448                "Need at least 2 points for linear fit".to_string(),
449            ));
450        }
451
452        // Convert uncertainties to weights
453        let w: Vec<f64> = weights
454            .iter()
455            .map(|&sigma| {
456                if sigma > 0.0 {
457                    1.0 / (sigma * sigma)
458                } else {
459                    1.0
460                }
461            })
462            .collect();
463
464        // Weighted least squares formulas
465        let sum_w: f64 = w.iter().sum();
466        let sum_wx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi).sum();
467        let sum_wy: f64 = w.iter().zip(y.iter()).map(|(&wi, &yi)| wi * yi).sum();
468        let sum_wxx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi * xi).sum();
469        let sum_wxy: f64 = w
470            .iter()
471            .zip(x.iter())
472            .zip(y.iter())
473            .map(|((&wi, &xi), &yi)| wi * xi * yi)
474            .sum();
475
476        let delta = sum_w.mul_add(sum_wxx, -(sum_wx * sum_wx));
477        if delta.abs() < 1e-12 {
478            return Err(SimulatorError::NumericalError(
479                "Singular matrix in linear fit".to_string(),
480            ));
481        }
482
483        let a = sum_wxx.mul_add(sum_wy, -(sum_wx * sum_wxy)) / delta; // intercept
484        let b = sum_w.mul_add(sum_wxy, -(sum_wx * sum_wy)) / delta; // slope
485
486        // Calculate residuals and statistics
487        let mut residuals = Vec::with_capacity(n);
488        let mut ss_res = 0.0;
489        let mut ss_tot = 0.0;
490        let y_mean = y.iter().sum::<f64>() / n as f64;
491
492        for i in 0..n {
493            let predicted = b.mul_add(x[i], a);
494            let residual = y[i] - predicted;
495            residuals.push(residual);
496            ss_res += residual * residual;
497            ss_tot += (y[i] - y_mean) * (y[i] - y_mean);
498        }
499
500        let r_squared = if ss_tot > 0.0 {
501            1.0 - ss_res / ss_tot
502        } else {
503            0.0
504        };
505        let chi_squared_reduced = ss_res / (n - 2) as f64;
506
507        // AIC for linear model
508        let aic = (n as f64).mul_add((ss_res / n as f64).ln(), 2.0 * 2.0); // 2 parameters
509
510        let fit_stats = FitStatistics {
511            r_squared,
512            chi_squared_reduced,
513            aic,
514            num_parameters: 2,
515            residuals,
516        };
517
518        Ok((a, b, fit_stats))
519    }
520
521    /// Solve weighted least squares (simplified)
522    fn solve_weighted_least_squares(
523        &self,
524        design_matrix: &Array2<f64>,
525        y: &[f64],
526        weights: &[f64],
527    ) -> Result<Vec<f64>> {
528        // This is a simplified implementation
529        // In practice, would use proper numerical linear algebra
530        let n_params = design_matrix.ncols();
531        let mut coefficients = vec![0.0; n_params];
532
533        // For now, just return intercept as first measured value
534        // Proper implementation would solve the normal equations
535        coefficients[0] = y[0];
536
537        Ok(coefficients)
538    }
539
540    /// Estimate measurement uncertainty
541    fn estimate_measurement_uncertainty(&self, measured_value: f64) -> f64 {
542        // Shot noise estimate: σ ≈ √(p(1-p)/N) for probability p and N shots
543        let p = f64::midpoint(measured_value, 1.0); // Convert from [-1,1] to [0,1]
544        let shot_noise = (p * (1.0 - p) / self.shots_per_level as f64).sqrt();
545        shot_noise * 2.0 // Convert back to [-1,1] scale
546    }
547
548    /// Calculate confidence in extrapolation
549    fn calculate_confidence(&self, fit_stats: &FitStatistics) -> f64 {
550        // Confidence based on R-squared and reduced chi-squared
551        let r_sq_factor = fit_stats.r_squared.clamp(0.0, 1.0);
552        let chi_sq_factor = if fit_stats.chi_squared_reduced > 0.0 {
553            1.0 / (1.0 + fit_stats.chi_squared_reduced)
554        } else {
555            0.5
556        };
557
558        (r_sq_factor * chi_sq_factor).sqrt()
559    }
560}
561
562/// Virtual Distillation implementation
563pub struct VirtualDistillation {
564    /// Number of copies to use
565    num_copies: usize,
566    /// Distillation protocol
567    protocol: DistillationProtocol,
568}
569
570/// Distillation protocols
571#[derive(Debug, Clone, Copy)]
572pub enum DistillationProtocol {
573    /// Standard virtual distillation
574    Standard,
575    /// Optimized protocol for specific observables
576    Optimized,
577    /// Adaptive protocol
578    Adaptive,
579}
580
581impl VirtualDistillation {
582    /// Create new virtual distillation instance
583    #[must_use]
584    pub const fn new(num_copies: usize, protocol: DistillationProtocol) -> Self {
585        Self {
586            num_copies,
587            protocol,
588        }
589    }
590
591    /// Perform virtual distillation
592    pub fn distill<F>(
593        &self,
594        noisy_executor: F,
595        observable: &str,
596    ) -> Result<VirtualDistillationResult>
597    where
598        F: Fn() -> Result<f64>,
599    {
600        // Execute circuit multiple times
601        let measurements: Result<Vec<f64>> =
602            (0..self.num_copies).map(|_| noisy_executor()).collect();
603
604        let measurements = measurements?;
605
606        // Apply distillation protocol
607        let mitigated_value = match self.protocol {
608            DistillationProtocol::Standard => self.standard_distillation(&measurements)?,
609            DistillationProtocol::Optimized => self.optimized_distillation(&measurements)?,
610            DistillationProtocol::Adaptive => self.adaptive_distillation(&measurements)?,
611        };
612
613        // Calculate efficiency metrics
614        let raw_value = measurements.iter().sum::<f64>() / measurements.len() as f64;
615        let error_reduction = (raw_value - mitigated_value).abs() / raw_value.abs().max(1e-10);
616
617        Ok(VirtualDistillationResult {
618            mitigated_value,
619            overhead: self.num_copies,
620            efficiency: 1.0 / self.num_copies as f64,
621            error_reduction,
622        })
623    }
624
625    /// Standard virtual distillation
626    fn standard_distillation(&self, measurements: &[f64]) -> Result<f64> {
627        // Compute product of measurements and take appropriate root
628        let product: f64 = measurements.iter().product();
629        let mitigated = if product >= 0.0 {
630            product.powf(1.0 / self.num_copies as f64)
631        } else {
632            -(-product).powf(1.0 / self.num_copies as f64)
633        };
634
635        Ok(mitigated)
636    }
637
638    /// Optimized distillation
639    fn optimized_distillation(&self, measurements: &[f64]) -> Result<f64> {
640        // Use median for robustness
641        let mut sorted = measurements.to_vec();
642        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
643        let median = sorted[sorted.len() / 2];
644        Ok(median)
645    }
646
647    /// Adaptive distillation
648    fn adaptive_distillation(&self, measurements: &[f64]) -> Result<f64> {
649        // Choose method based on measurement statistics
650        let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
651        let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
652            / measurements.len() as f64;
653
654        if variance < 0.1 {
655            self.standard_distillation(measurements)
656        } else {
657            self.optimized_distillation(measurements)
658        }
659    }
660}
661
662/// Symmetry Verification implementation
663pub struct SymmetryVerification {
664    /// Symmetries to check
665    symmetries: Vec<SymmetryOperation>,
666}
667
668/// Symmetry operation
669#[derive(Debug, Clone)]
670pub struct SymmetryOperation {
671    /// Name of the symmetry
672    pub name: String,
673    /// Expected eigenvalue
674    pub eigenvalue: Complex64,
675    /// Tolerance for symmetry violation
676    pub tolerance: f64,
677}
678
679impl SymmetryVerification {
680    /// Create new symmetry verification
681    #[must_use]
682    pub const fn new(symmetries: Vec<SymmetryOperation>) -> Self {
683        Self { symmetries }
684    }
685
686    /// Verify symmetries and correct expectation value
687    pub fn verify_and_correct<F>(
688        &self,
689        executor: F,
690        observable: &str,
691    ) -> Result<SymmetryVerificationResult>
692    where
693        F: Fn(&str) -> Result<f64>,
694    {
695        let main_value = executor(observable)?;
696
697        let mut violations = Vec::new();
698        let mut valid_measurements = Vec::new();
699
700        // Check each symmetry
701        for symmetry in &self.symmetries {
702            let symmetry_value = executor(&symmetry.name)?;
703            let expected = symmetry.eigenvalue.re;
704            let violation = (symmetry_value - expected).abs();
705
706            violations.push(violation);
707
708            if violation <= symmetry.tolerance {
709                valid_measurements.push(main_value);
710            }
711        }
712
713        // Calculate corrected value
714        let corrected_value = if valid_measurements.is_empty() {
715            main_value // No valid measurements, return original
716        } else {
717            valid_measurements.iter().sum::<f64>() / valid_measurements.len() as f64
718        };
719
720        let avg_violation = violations.iter().sum::<f64>() / violations.len() as f64;
721        let post_selection_prob =
722            valid_measurements.len() as f64 / (self.symmetries.len() + 1) as f64;
723
724        Ok(SymmetryVerificationResult {
725            corrected_value,
726            symmetry_violation: avg_violation,
727            post_selection_prob,
728            symmetries: self.symmetries.iter().map(|s| s.name.clone()).collect(),
729        })
730    }
731}
732
733/// Utility trait for unzipping tuples
734trait Unzip3<A, B, C> {
735    fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>);
736}
737
738impl<A, B, C> Unzip3<A, B, C> for Vec<(A, B, C)> {
739    fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>) {
740        let mut vec_a = Vec::with_capacity(self.len());
741        let mut vec_b = Vec::with_capacity(self.len());
742        let mut vec_c = Vec::with_capacity(self.len());
743
744        for (a, b, c) in self {
745            vec_a.push(a);
746            vec_b.push(b);
747            vec_c.push(c);
748        }
749
750        (vec_a, vec_b, vec_c)
751    }
752}
753
754/// Benchmark noise extrapolation techniques
755pub fn benchmark_noise_extrapolation() -> Result<(ZNEResult, VirtualDistillationResult)> {
756    // Mock noisy executor
757    let noisy_executor = |noise_factor: f64| -> Result<f64> {
758        // Simulate exponential decay with noise
759        let ideal_value = 1.0;
760        let noise_rate = 0.1;
761        Ok(ideal_value * (-noise_rate * noise_factor).exp())
762    };
763
764    // Test ZNE
765    let zne = ZeroNoiseExtrapolator::default_config();
766    let zne_result = zne.extrapolate(noisy_executor, "Z0")?;
767
768    // Test virtual distillation
769    let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
770    let vd_result = vd.distill(|| Ok(0.8), "Z0")?;
771
772    Ok((zne_result, vd_result))
773}
774
775// ============================================================================
776// Advanced Error Mitigation Techniques (Beyond Qiskit)
777// ============================================================================
778
779/// Probabilistic Error Cancellation (PEC) result
780#[derive(Debug, Clone, Serialize, Deserialize)]
781pub struct PECResult {
782    /// Mitigated expectation value
783    pub mitigated_value: f64,
784    /// Statistical error
785    pub statistical_error: f64,
786    /// Sampling overhead (γ^2)
787    pub sampling_overhead: f64,
788    /// Number of samples used
789    pub num_samples: usize,
790    /// Effective samples after error cancellation
791    pub effective_samples: f64,
792}
793
794/// Probabilistic Error Cancellation (PEC) implementation
795///
796/// PEC provides exact error mitigation at the cost of increased sampling overhead.
797/// It works by representing the ideal operation as a quasi-probability distribution
798/// over noisy operations.
799#[derive(Debug, Clone)]
800pub struct ProbabilisticErrorCancellation {
801    /// Noise model for the device
802    pub noise_model: NoiseModel,
803    /// Number of samples for PEC
804    pub num_samples: usize,
805    /// Monte Carlo seed for reproducibility
806    pub seed: Option<u64>,
807}
808
809/// Simplified noise model for PEC
810#[derive(Debug, Clone, Default)]
811pub struct NoiseModel {
812    /// Single-qubit gate error rates
813    pub single_qubit_errors: std::collections::HashMap<String, f64>,
814    /// Two-qubit gate error rates
815    pub two_qubit_errors: std::collections::HashMap<(usize, usize), f64>,
816    /// Readout error rates
817    pub readout_errors: std::collections::HashMap<usize, (f64, f64)>,
818}
819
820impl ProbabilisticErrorCancellation {
821    /// Create a new PEC instance
822    pub fn new(noise_model: NoiseModel, num_samples: usize) -> Self {
823        Self {
824            noise_model,
825            num_samples,
826            seed: None,
827        }
828    }
829
830    /// Set seed for reproducibility
831    pub fn with_seed(mut self, seed: u64) -> Self {
832        self.seed = Some(seed);
833        self
834    }
835
836    /// Calculate the sampling overhead γ for a circuit
837    pub fn calculate_overhead(&self, circuit_depth: usize, num_gates: usize) -> f64 {
838        // γ ≈ ∏_i (1 + 2ε_i) where ε_i is the error rate for gate i
839        let avg_error: f64 = self
840            .noise_model
841            .single_qubit_errors
842            .values()
843            .cloned()
844            .sum::<f64>()
845            / self.noise_model.single_qubit_errors.len().max(1) as f64;
846
847        let gamma_per_gate = 1.0 + 2.0 * avg_error;
848        gamma_per_gate.powi(num_gates as i32)
849    }
850
851    /// Apply PEC to mitigate errors
852    pub fn mitigate<F>(&self, executor: F, observable: &str) -> Result<PECResult>
853    where
854        F: Fn(&str, i32) -> Result<f64>,
855    {
856        use scirs2_core::random::thread_rng;
857        use scirs2_core::random::Rng;
858
859        let mut rng = thread_rng();
860        let mut weighted_sum = 0.0;
861        let mut weight_sum = 0.0;
862        let mut samples = Vec::with_capacity(self.num_samples);
863
864        for _ in 0..self.num_samples {
865            // Sign for quasi-probability (can be negative)
866            let sign: i32 = if rng.gen::<f64>() < 0.5 { 1 } else { -1 };
867
868            // Execute the circuit with the given sign
869            let value = executor(observable, sign)?;
870            let weighted_value = sign as f64 * value;
871
872            weighted_sum += weighted_value;
873            weight_sum += 1.0;
874            samples.push(weighted_value);
875        }
876
877        let mitigated_value = weighted_sum / weight_sum;
878
879        // Calculate statistical error
880        let variance: f64 = samples
881            .iter()
882            .map(|&x| (x - mitigated_value).powi(2))
883            .sum::<f64>()
884            / (self.num_samples - 1).max(1) as f64;
885        let statistical_error = (variance / self.num_samples as f64).sqrt();
886
887        // Estimate sampling overhead
888        let sampling_overhead = self.calculate_overhead(10, 20); // Default estimate
889
890        Ok(PECResult {
891            mitigated_value,
892            statistical_error,
893            sampling_overhead,
894            num_samples: self.num_samples,
895            effective_samples: self.num_samples as f64 / sampling_overhead,
896        })
897    }
898}
899
900/// Clifford Data Regression (CDR) result
901#[derive(Debug, Clone, Serialize, Deserialize)]
902pub struct CDRResult {
903    /// Mitigated expectation value
904    pub mitigated_value: f64,
905    /// Regression error
906    pub regression_error: f64,
907    /// Number of Clifford circuits used
908    pub num_clifford_circuits: usize,
909    /// Regression coefficients
910    pub coefficients: Vec<f64>,
911    /// R-squared value of the fit
912    pub r_squared: f64,
913}
914
915/// Clifford Data Regression (CDR) implementation
916///
917/// CDR uses near-Clifford circuits where exact simulation is possible
918/// to learn a correction model for noisy circuit results.
919#[derive(Debug, Clone)]
920pub struct CliffordDataRegression {
921    /// Number of near-Clifford training circuits
922    pub num_training_circuits: usize,
923    /// Regression method
924    pub regression_method: RegressionMethod,
925    /// Include intercept in regression
926    pub fit_intercept: bool,
927}
928
929/// Regression methods for CDR
930#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
931pub enum RegressionMethod {
932    /// Ordinary least squares
933    OrdinaryLeastSquares,
934    /// Ridge regression
935    Ridge { alpha: f64 },
936    /// Lasso regression
937    Lasso { alpha: f64 },
938    /// Bayesian regression
939    Bayesian,
940}
941
942impl Default for CliffordDataRegression {
943    fn default() -> Self {
944        Self {
945            num_training_circuits: 100,
946            regression_method: RegressionMethod::OrdinaryLeastSquares,
947            fit_intercept: true,
948        }
949    }
950}
951
952impl CliffordDataRegression {
953    /// Create a new CDR instance
954    pub fn new(num_training_circuits: usize, regression_method: RegressionMethod) -> Self {
955        Self {
956            num_training_circuits,
957            regression_method,
958            fit_intercept: true,
959        }
960    }
961
962    /// Train the CDR model using Clifford circuits
963    pub fn train<F, G>(&self, noisy_executor: F, ideal_executor: G) -> Result<CDRModel>
964    where
965        F: Fn(usize) -> Result<f64>,
966        G: Fn(usize) -> Result<f64>,
967    {
968        let mut noisy_values = Vec::with_capacity(self.num_training_circuits);
969        let mut ideal_values = Vec::with_capacity(self.num_training_circuits);
970
971        for i in 0..self.num_training_circuits {
972            noisy_values.push(noisy_executor(i)?);
973            ideal_values.push(ideal_executor(i)?);
974        }
975
976        // Simple linear regression: ideal = a * noisy + b
977        let n = self.num_training_circuits as f64;
978        let sum_x: f64 = noisy_values.iter().sum();
979        let sum_y: f64 = ideal_values.iter().sum();
980        let sum_xx: f64 = noisy_values.iter().map(|x| x * x).sum();
981        let sum_xy: f64 = noisy_values
982            .iter()
983            .zip(&ideal_values)
984            .map(|(x, y)| x * y)
985            .sum();
986
987        let (a, b) = if self.fit_intercept {
988            let denom = n * sum_xx - sum_x * sum_x;
989            if denom.abs() < 1e-10 {
990                (1.0, 0.0)
991            } else {
992                let slope = (n * sum_xy - sum_x * sum_y) / denom;
993                let intercept = (sum_y - slope * sum_x) / n;
994                (slope, intercept)
995            }
996        } else {
997            (sum_xy / sum_xx, 0.0)
998        };
999
1000        // Calculate R-squared
1001        let y_mean = sum_y / n;
1002        let ss_tot: f64 = ideal_values.iter().map(|y| (y - y_mean).powi(2)).sum();
1003        let ss_res: f64 = noisy_values
1004            .iter()
1005            .zip(&ideal_values)
1006            .map(|(x, y)| (y - (a * x + b)).powi(2))
1007            .sum();
1008        let r_squared = if ss_tot > 1e-10 {
1009            1.0 - ss_res / ss_tot
1010        } else {
1011            0.0
1012        };
1013
1014        Ok(CDRModel {
1015            coefficients: vec![a, b],
1016            r_squared,
1017            num_training_points: self.num_training_circuits,
1018        })
1019    }
1020
1021    /// Apply CDR to mitigate errors
1022    pub fn mitigate(&self, model: &CDRModel, noisy_value: f64) -> Result<CDRResult> {
1023        let mitigated_value = model.coefficients[0] * noisy_value + model.coefficients[1];
1024
1025        // Estimate regression error from R-squared
1026        let regression_error = (1.0 - model.r_squared).sqrt() * noisy_value.abs().max(0.1);
1027
1028        Ok(CDRResult {
1029            mitigated_value,
1030            regression_error,
1031            num_clifford_circuits: model.num_training_points,
1032            coefficients: model.coefficients.clone(),
1033            r_squared: model.r_squared,
1034        })
1035    }
1036}
1037
1038/// Trained CDR model
1039#[derive(Debug, Clone, Serialize, Deserialize)]
1040pub struct CDRModel {
1041    /// Regression coefficients
1042    pub coefficients: Vec<f64>,
1043    /// R-squared value
1044    pub r_squared: f64,
1045    /// Number of training points
1046    pub num_training_points: usize,
1047}
1048
1049/// Matrix-free Measurement Mitigation (M3) result
1050#[derive(Debug, Clone, Serialize, Deserialize)]
1051pub struct M3Result {
1052    /// Mitigated probability distribution
1053    pub mitigated_probs: std::collections::HashMap<String, f64>,
1054    /// Original counts
1055    pub original_counts: std::collections::HashMap<String, usize>,
1056    /// Number of iterations for convergence
1057    pub num_iterations: usize,
1058    /// Convergence achieved
1059    pub converged: bool,
1060    /// Final residual norm
1061    pub residual_norm: f64,
1062}
1063
1064/// Matrix-free Measurement Mitigation (M3) implementation
1065///
1066/// M3 provides scalable measurement error mitigation without needing to
1067/// construct or invert the full assignment matrix. It uses iterative
1068/// methods to solve the mitigation problem efficiently.
1069#[derive(Debug, Clone)]
1070pub struct MatrixFreeMeasurementMitigation {
1071    /// Calibration data: confusion matrices per qubit
1072    pub calibration: M3Calibration,
1073    /// Maximum iterations for iterative solver
1074    pub max_iterations: usize,
1075    /// Convergence tolerance
1076    pub tolerance: f64,
1077    /// Method for solving the inverse problem
1078    pub solver_method: M3SolverMethod,
1079}
1080
1081/// M3 calibration data
1082#[derive(Debug, Clone, Default)]
1083pub struct M3Calibration {
1084    /// Per-qubit confusion matrices
1085    /// `confusion[qubit][measured][actual]` = P(measured | actual)
1086    pub confusion_matrices: std::collections::HashMap<usize, [[f64; 2]; 2]>,
1087    /// Correlated errors between qubit pairs (optional)
1088    pub correlated_errors: std::collections::HashMap<(usize, usize), Array2<f64>>,
1089}
1090
1091/// Solver methods for M3
1092#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1093pub enum M3SolverMethod {
1094    /// Iterative Bayesian unfolding
1095    IterativeBayesian,
1096    /// Least squares with non-negativity constraint
1097    NonNegativeLeastSquares,
1098    /// Conjugate gradient method
1099    ConjugateGradient,
1100}
1101
1102impl Default for MatrixFreeMeasurementMitigation {
1103    fn default() -> Self {
1104        Self {
1105            calibration: M3Calibration::default(),
1106            max_iterations: 100,
1107            tolerance: 1e-6,
1108            solver_method: M3SolverMethod::IterativeBayesian,
1109        }
1110    }
1111}
1112
1113impl MatrixFreeMeasurementMitigation {
1114    /// Create a new M3 instance
1115    pub fn new(calibration: M3Calibration) -> Self {
1116        Self {
1117            calibration,
1118            ..Default::default()
1119        }
1120    }
1121
1122    /// Calibrate M3 using calibration circuits
1123    pub fn calibrate<F>(&mut self, executor: F, num_qubits: usize, shots: usize) -> Result<()>
1124    where
1125        F: Fn(&str, usize) -> Result<std::collections::HashMap<String, usize>>,
1126    {
1127        for qubit in 0..num_qubits {
1128            // Prepare |0⟩ state and measure
1129            let counts_0 = executor(&format!("cal_0_{}", qubit), shots)?;
1130            let p00 = *counts_0.get("0").unwrap_or(&0) as f64 / shots as f64;
1131            let p10 = *counts_0.get("1").unwrap_or(&0) as f64 / shots as f64;
1132
1133            // Prepare |1⟩ state and measure
1134            let counts_1 = executor(&format!("cal_1_{}", qubit), shots)?;
1135            let p01 = *counts_1.get("0").unwrap_or(&0) as f64 / shots as f64;
1136            let p11 = *counts_1.get("1").unwrap_or(&0) as f64 / shots as f64;
1137
1138            // confusion[measured][actual] = P(measured | actual)
1139            self.calibration
1140                .confusion_matrices
1141                .insert(qubit, [[p00, p01], [p10, p11]]);
1142        }
1143
1144        Ok(())
1145    }
1146
1147    /// Apply M3 to mitigate measurement errors
1148    pub fn mitigate(
1149        &self,
1150        counts: &std::collections::HashMap<String, usize>,
1151        num_qubits: usize,
1152    ) -> Result<M3Result> {
1153        let total_shots: usize = counts.values().sum();
1154
1155        // Convert counts to probability distribution
1156        let mut probs: std::collections::HashMap<String, f64> = counts
1157            .iter()
1158            .map(|(k, v)| (k.clone(), *v as f64 / total_shots as f64))
1159            .collect();
1160
1161        // Iterative Bayesian unfolding
1162        let mut converged = false;
1163        let mut num_iterations = 0;
1164        let mut residual_norm = f64::MAX;
1165
1166        for iter in 0..self.max_iterations {
1167            num_iterations = iter + 1;
1168
1169            // Apply inverse confusion matrix (iterative)
1170            let mut new_probs = std::collections::HashMap::new();
1171            let mut total = 0.0;
1172
1173            for (bitstring, &prob) in &probs {
1174                // Calculate correction factor based on confusion matrices
1175                let mut correction = 1.0;
1176
1177                for (i, c) in bitstring.chars().rev().enumerate() {
1178                    if let Some(confusion) = self.calibration.confusion_matrices.get(&i) {
1179                        let measured = if c == '1' { 1 } else { 0 };
1180                        // Diagonal element of inverse (approximate)
1181                        let diag = confusion[measured][measured];
1182                        if diag > 0.01 {
1183                            correction /= diag;
1184                        }
1185                    }
1186                }
1187
1188                let corrected_prob = (prob * correction).max(0.0);
1189                new_probs.insert(bitstring.clone(), corrected_prob);
1190                total += corrected_prob;
1191            }
1192
1193            // Normalize
1194            if total > 0.0 {
1195                for prob in new_probs.values_mut() {
1196                    *prob /= total;
1197                }
1198            }
1199
1200            // Check convergence
1201            let mut diff = 0.0;
1202            for (k, &new_p) in &new_probs {
1203                let old_p = probs.get(k).unwrap_or(&0.0);
1204                diff += (new_p - old_p).abs();
1205            }
1206            residual_norm = diff;
1207
1208            probs = new_probs;
1209
1210            if diff < self.tolerance {
1211                converged = true;
1212                break;
1213            }
1214        }
1215
1216        Ok(M3Result {
1217            mitigated_probs: probs,
1218            original_counts: counts.clone(),
1219            num_iterations,
1220            converged,
1221            residual_norm,
1222        })
1223    }
1224
1225    /// Compute expectation value from mitigated probabilities
1226    pub fn expectation_value(&self, result: &M3Result, observable: &[i8]) -> f64 {
1227        let mut expectation = 0.0;
1228
1229        for (bitstring, &prob) in &result.mitigated_probs {
1230            let mut parity = 1;
1231            for (i, c) in bitstring.chars().rev().enumerate() {
1232                if i < observable.len() && observable[i] != 0 && c == '1' {
1233                    parity *= -1;
1234                }
1235            }
1236            expectation += parity as f64 * prob;
1237        }
1238
1239        expectation
1240    }
1241}
1242
1243/// Combined error mitigation pipeline
1244#[derive(Debug, Clone)]
1245pub struct ErrorMitigationPipeline {
1246    /// ZNE configuration
1247    pub zne: Option<ZeroNoiseExtrapolator>,
1248    /// PEC configuration
1249    pub pec: Option<ProbabilisticErrorCancellation>,
1250    /// CDR configuration
1251    pub cdr: Option<CliffordDataRegression>,
1252    /// M3 configuration
1253    pub m3: Option<MatrixFreeMeasurementMitigation>,
1254}
1255
1256impl Default for ErrorMitigationPipeline {
1257    fn default() -> Self {
1258        Self {
1259            zne: Some(ZeroNoiseExtrapolator::default_config()),
1260            pec: None,
1261            cdr: None,
1262            m3: None,
1263        }
1264    }
1265}
1266
1267impl ErrorMitigationPipeline {
1268    /// Create a pipeline with all mitigation techniques
1269    pub fn full() -> Self {
1270        Self {
1271            zne: Some(ZeroNoiseExtrapolator::default_config()),
1272            pec: Some(ProbabilisticErrorCancellation::new(
1273                NoiseModel::default(),
1274                1000,
1275            )),
1276            cdr: Some(CliffordDataRegression::default()),
1277            m3: Some(MatrixFreeMeasurementMitigation::default()),
1278        }
1279    }
1280
1281    /// Apply the mitigation pipeline with noise-aware executor
1282    ///
1283    /// The executor takes a noise scaling factor and returns the expectation value
1284    /// at that noise level. This allows ZNE to extrapolate to zero noise.
1285    pub fn apply<F>(&self, executor: F, observable: &str) -> Result<f64>
1286    where
1287        F: Fn(f64) -> Result<f64> + Sync + Send,
1288    {
1289        // Start with ZNE if available
1290        let mut value = if let Some(zne) = &self.zne {
1291            zne.extrapolate(&executor, observable)?.zero_noise_value
1292        } else {
1293            // Execute at noise factor 1.0 (no scaling)
1294            executor(1.0)?
1295        };
1296
1297        // Apply CDR correction if model is trained
1298        // (In practice, would need pre-trained model)
1299
1300        Ok(value)
1301    }
1302
1303    /// Apply mitigation with a simple circuit executor
1304    ///
1305    /// This method wraps a simple executor that doesn't support noise scaling
1306    /// and applies only applicable mitigation techniques.
1307    pub fn apply_simple<F>(&self, executor: F, observable: &str) -> Result<f64>
1308    where
1309        F: Fn(&str) -> Result<f64>,
1310    {
1311        // For simple executors, skip ZNE (requires noise scaling)
1312        // and just apply other techniques
1313        let value = executor(observable)?;
1314
1315        // CDR and M3 can be applied post-hoc to measurement results
1316        // but require calibration data which would be handled separately
1317
1318        Ok(value)
1319    }
1320}
1321
1322#[cfg(test)]
1323mod tests {
1324    use super::*;
1325
1326    #[test]
1327    fn test_zne_linear_extrapolation() {
1328        let zne = ZeroNoiseExtrapolator::new(
1329            NoiseScalingMethod::UnitaryFolding,
1330            ExtrapolationMethod::Linear,
1331            vec![1.0, 2.0, 3.0],
1332            100,
1333        );
1334
1335        // Mock data: y = 1.0 - 0.1*x (should extrapolate to 1.0)
1336        let noise_factors = vec![1.0, 2.0, 3.0];
1337        let measured_values = vec![0.9, 0.8, 0.7];
1338        let uncertainties = vec![0.01, 0.01, 0.01];
1339
1340        let (zero_noise, _error, _stats) = zne
1341            .linear_extrapolation(&noise_factors, &measured_values, &uncertainties)
1342            .expect("Failed to perform linear extrapolation");
1343
1344        assert!((zero_noise - 1.0).abs() < 0.1);
1345    }
1346
1347    #[test]
1348    fn test_virtual_distillation() {
1349        let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
1350
1351        let measurements = vec![0.8, 0.7, 0.9];
1352        let result = vd
1353            .standard_distillation(&measurements)
1354            .expect("Failed to perform standard distillation");
1355
1356        assert!(result > 0.0);
1357        assert!(result < 1.0);
1358    }
1359
1360    #[test]
1361    fn test_symmetry_verification() {
1362        let symmetries = vec![SymmetryOperation {
1363            name: "parity".to_string(),
1364            eigenvalue: Complex64::new(1.0, 0.0),
1365            tolerance: 0.1,
1366        }];
1367
1368        let sv = SymmetryVerification::new(symmetries);
1369
1370        let executor = |obs: &str| -> Result<f64> {
1371            match obs {
1372                "Z0" => Ok(0.8),
1373                "parity" => Ok(0.95), // Close to expected value 1.0
1374                _ => Ok(0.0),
1375            }
1376        };
1377
1378        let result = sv
1379            .verify_and_correct(executor, "Z0")
1380            .expect("Failed to verify and correct");
1381        assert!((result.corrected_value - 0.8).abs() < 1e-10);
1382        assert!(result.post_selection_prob > 0.0);
1383    }
1384
1385    #[test]
1386    fn test_richardson_extrapolation() {
1387        let zne = ZeroNoiseExtrapolator::default_config();
1388
1389        let noise_factors = vec![1.0, 2.0, 3.0];
1390        let measured_values = vec![1.0, 0.8, 0.6]; // Quadratic decay
1391
1392        let (zero_noise, _error, _stats) = zne
1393            .richardson_extrapolation(&noise_factors, &measured_values)
1394            .expect("Failed to perform Richardson extrapolation");
1395
1396        assert!(zero_noise > 0.0);
1397    }
1398}