1use scirs2_core::ndarray::Array2;
8use scirs2_core::parallel_ops::{
9 IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator,
10};
11use scirs2_core::Complex64;
12use serde::{Deserialize, Serialize};
13
14use crate::error::{Result, SimulatorError};
15
16#[derive(Debug, Clone, Serialize, Deserialize)]
18pub struct ZNEResult {
19 pub zero_noise_value: f64,
21 pub error_estimate: f64,
23 pub noise_factors: Vec<f64>,
25 pub measured_values: Vec<f64>,
27 pub uncertainties: Vec<f64>,
29 pub method: ExtrapolationMethod,
31 pub confidence: f64,
33 pub fit_stats: FitStatistics,
35}
36
37#[derive(Debug, Clone, Serialize, Deserialize)]
39pub struct VirtualDistillationResult {
40 pub mitigated_value: f64,
42 pub overhead: usize,
44 pub efficiency: f64,
46 pub error_reduction: f64,
48}
49
50#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct SymmetryVerificationResult {
53 pub corrected_value: f64,
55 pub symmetry_violation: f64,
57 pub post_selection_prob: f64,
59 pub symmetries: Vec<String>,
61}
62
63#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
65pub enum ExtrapolationMethod {
66 Linear,
68 Exponential,
70 Polynomial(usize),
72 Richardson,
74 Adaptive,
76}
77
78#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
80pub enum NoiseScalingMethod {
81 UnitaryFolding,
83 LocalFolding,
85 ParameterScaling,
87 IdentityInsertion,
89 PauliTwirling,
91}
92
93#[derive(Debug, Clone, Default, Serialize, Deserialize)]
95pub struct FitStatistics {
96 pub r_squared: f64,
98 pub chi_squared_reduced: f64,
100 pub aic: f64,
102 pub num_parameters: usize,
104 pub residuals: Vec<f64>,
106}
107
108#[derive(Debug, Clone)]
110pub struct ZeroNoiseExtrapolator {
111 scaling_method: NoiseScalingMethod,
113 extrapolation_method: ExtrapolationMethod,
115 noise_factors: Vec<f64>,
117 shots_per_level: usize,
119 max_poly_order: usize,
121}
122
123impl ZeroNoiseExtrapolator {
124 #[must_use]
126 pub const fn new(
127 scaling_method: NoiseScalingMethod,
128 extrapolation_method: ExtrapolationMethod,
129 noise_factors: Vec<f64>,
130 shots_per_level: usize,
131 ) -> Self {
132 Self {
133 scaling_method,
134 extrapolation_method,
135 noise_factors,
136 shots_per_level,
137 max_poly_order: 4,
138 }
139 }
140
141 #[must_use]
143 pub fn default_config() -> Self {
144 Self::new(
145 NoiseScalingMethod::UnitaryFolding,
146 ExtrapolationMethod::Linear,
147 vec![1.0, 1.5, 2.0, 2.5, 3.0],
148 1000,
149 )
150 }
151
152 pub fn extrapolate<F>(&self, noisy_executor: F, observable: &str) -> Result<ZNEResult>
154 where
155 F: Fn(f64) -> Result<f64> + Sync + Send,
156 {
157 let start_time = std::time::Instant::now();
158
159 let measurements: Result<Vec<(f64, f64, f64)>> = self
161 .noise_factors
162 .par_iter()
163 .map(|&noise_factor| {
164 let measured_value = noisy_executor(noise_factor)?;
166
167 let uncertainty = self.estimate_measurement_uncertainty(measured_value);
169
170 Ok((noise_factor, measured_value, uncertainty))
171 })
172 .collect();
173
174 let measurements = measurements?;
175 let (noise_factors, measured_values, uncertainties): (Vec<f64>, Vec<f64>, Vec<f64>) =
176 measurements.unzip3();
177
178 let (zero_noise_value, error_estimate, fit_stats) = match self.extrapolation_method {
180 ExtrapolationMethod::Linear => {
181 self.linear_extrapolation(&noise_factors, &measured_values, &uncertainties)?
182 }
183 ExtrapolationMethod::Exponential => {
184 self.exponential_extrapolation(&noise_factors, &measured_values, &uncertainties)?
185 }
186 ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
187 &noise_factors,
188 &measured_values,
189 &uncertainties,
190 order,
191 )?,
192 ExtrapolationMethod::Richardson => {
193 self.richardson_extrapolation(&noise_factors, &measured_values)?
194 }
195 ExtrapolationMethod::Adaptive => {
196 self.adaptive_extrapolation(&noise_factors, &measured_values, &uncertainties)?
197 }
198 };
199
200 let confidence = self.calculate_confidence(&fit_stats);
202
203 Ok(ZNEResult {
204 zero_noise_value,
205 error_estimate,
206 noise_factors,
207 measured_values,
208 uncertainties,
209 method: self.extrapolation_method,
210 confidence,
211 fit_stats,
212 })
213 }
214
215 fn linear_extrapolation(
217 &self,
218 noise_factors: &[f64],
219 measured_values: &[f64],
220 uncertainties: &[f64],
221 ) -> Result<(f64, f64, FitStatistics)> {
222 if noise_factors.len() < 2 {
223 return Err(SimulatorError::InvalidInput(
224 "Need at least 2 data points for linear extrapolation".to_string(),
225 ));
226 }
227
228 let (a, b, fit_stats) =
230 self.weighted_linear_fit(noise_factors, measured_values, uncertainties)?;
231
232 let zero_noise_value = a;
234
235 let error_estimate = fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
237 / fit_stats.residuals.len() as f64;
238
239 Ok((zero_noise_value, error_estimate, fit_stats))
240 }
241
242 fn exponential_extrapolation(
244 &self,
245 noise_factors: &[f64],
246 measured_values: &[f64],
247 uncertainties: &[f64],
248 ) -> Result<(f64, f64, FitStatistics)> {
249 let log_values: Vec<f64> = measured_values
251 .iter()
252 .map(|&y| if y > 0.0 { y.ln() } else { f64::NEG_INFINITY })
253 .collect();
254
255 if log_values.iter().any(|&x| x.is_infinite()) {
257 return Err(SimulatorError::NumericalError(
258 "Cannot take logarithm of non-positive values for exponential fit".to_string(),
259 ));
260 }
261
262 let log_uncertainties: Vec<f64> = uncertainties.iter().zip(measured_values.iter())
264 .map(|(&u, &y)| u / y.abs()) .collect();
266
267 let (ln_a, b, mut fit_stats) =
268 self.weighted_linear_fit(noise_factors, &log_values, &log_uncertainties)?;
269
270 let a = ln_a.exp();
272 let zero_noise_value = a; let error_estimate = a * fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
276 / fit_stats.residuals.len() as f64;
277
278 fit_stats.residuals = fit_stats
280 .residuals
281 .iter()
282 .zip(noise_factors.iter().enumerate())
283 .map(|(&_res, (idx, &x))| a.mul_add((b * x).exp(), -measured_values[idx]))
284 .collect();
285
286 Ok((zero_noise_value, error_estimate, fit_stats))
287 }
288
289 fn polynomial_extrapolation(
291 &self,
292 noise_factors: &[f64],
293 measured_values: &[f64],
294 uncertainties: &[f64],
295 order: usize,
296 ) -> Result<(f64, f64, FitStatistics)> {
297 if noise_factors.len() <= order {
298 return Err(SimulatorError::InvalidInput(format!(
299 "Need more than {order} data points for order {order} polynomial"
300 )));
301 }
302
303 let n = noise_factors.len();
305 let mut design_matrix = Array2::zeros((n, order + 1));
306
307 for (i, &x) in noise_factors.iter().enumerate() {
308 for j in 0..=order {
309 design_matrix[[i, j]] = x.powi(j as i32);
310 }
311 }
312
313 let coefficients =
315 self.solve_weighted_least_squares(&design_matrix, measured_values, uncertainties)?;
316
317 let zero_noise_value = coefficients[0];
319
320 let mut residuals = Vec::with_capacity(n);
322 for (i, &x) in noise_factors.iter().enumerate() {
323 let predicted: f64 = coefficients
324 .iter()
325 .enumerate()
326 .map(|(j, &coeff)| coeff * x.powi(j as i32))
327 .sum();
328 residuals.push(measured_values[i] - predicted);
329 }
330
331 let error_estimate =
332 residuals.iter().map(|r| r.abs()).sum::<f64>() / residuals.len() as f64;
333
334 let fit_stats = FitStatistics {
335 residuals,
336 num_parameters: order + 1,
337 ..Default::default()
338 };
339
340 Ok((zero_noise_value, error_estimate, fit_stats))
341 }
342
343 fn richardson_extrapolation(
345 &self,
346 noise_factors: &[f64],
347 measured_values: &[f64],
348 ) -> Result<(f64, f64, FitStatistics)> {
349 if noise_factors.len() < 3 {
350 return Err(SimulatorError::InvalidInput(
351 "Richardson extrapolation requires at least 3 data points".to_string(),
352 ));
353 }
354
355 let x1 = noise_factors[0];
357 let x2 = noise_factors[1];
358 let x3 = noise_factors[2];
359 let y1 = measured_values[0];
360 let y2 = measured_values[1];
361 let y3 = measured_values[2];
362
363 let denominator = (x1 - x2) * (x1 - x3) * (x2 - x3);
365 if denominator.abs() < 1e-12 {
366 return Err(SimulatorError::NumericalError(
367 "Noise factors too close for Richardson extrapolation".to_string(),
368 ));
369 }
370
371 let a = (y3 * x1 * x2).mul_add(
372 x1 - x2,
373 (y1 * x2 * x3).mul_add(x2 - x3, y2 * x1 * x3 * (x3 - x1)),
374 ) / denominator;
375
376 let zero_noise_value = a;
377
378 let error_estimate = (y1 - y2).abs().max((y2 - y3).abs()) * 0.1;
380
381 let fit_stats = FitStatistics {
382 num_parameters: 3,
383 ..Default::default()
384 };
385
386 Ok((zero_noise_value, error_estimate, fit_stats))
387 }
388
389 fn adaptive_extrapolation(
391 &self,
392 noise_factors: &[f64],
393 measured_values: &[f64],
394 uncertainties: &[f64],
395 ) -> Result<(f64, f64, FitStatistics)> {
396 let mut best_result = None;
397 let mut best_aic = f64::INFINITY;
398
399 let methods = vec![
401 ExtrapolationMethod::Linear,
402 ExtrapolationMethod::Exponential,
403 ExtrapolationMethod::Polynomial(2),
404 ExtrapolationMethod::Polynomial(3),
405 ];
406
407 for method in methods {
408 let result = match method {
409 ExtrapolationMethod::Linear => {
410 self.linear_extrapolation(noise_factors, measured_values, uncertainties)
411 }
412 ExtrapolationMethod::Exponential => {
413 self.exponential_extrapolation(noise_factors, measured_values, uncertainties)
414 }
415 ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
416 noise_factors,
417 measured_values,
418 uncertainties,
419 order,
420 ),
421 _ => continue,
422 };
423
424 if let Ok((value, error, stats)) = result {
425 let aic = stats.aic;
426 if aic < best_aic {
427 best_aic = aic;
428 best_result = Some((value, error, stats));
429 }
430 }
431 }
432
433 best_result.ok_or_else(|| {
434 SimulatorError::ComputationError("No extrapolation method succeeded".to_string())
435 })
436 }
437
438 fn weighted_linear_fit(
440 &self,
441 x: &[f64],
442 y: &[f64],
443 weights: &[f64],
444 ) -> Result<(f64, f64, FitStatistics)> {
445 let n = x.len();
446 if n < 2 {
447 return Err(SimulatorError::InvalidInput(
448 "Need at least 2 points for linear fit".to_string(),
449 ));
450 }
451
452 let w: Vec<f64> = weights
454 .iter()
455 .map(|&sigma| {
456 if sigma > 0.0 {
457 1.0 / (sigma * sigma)
458 } else {
459 1.0
460 }
461 })
462 .collect();
463
464 let sum_w: f64 = w.iter().sum();
466 let sum_wx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi).sum();
467 let sum_wy: f64 = w.iter().zip(y.iter()).map(|(&wi, &yi)| wi * yi).sum();
468 let sum_wxx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi * xi).sum();
469 let sum_wxy: f64 = w
470 .iter()
471 .zip(x.iter())
472 .zip(y.iter())
473 .map(|((&wi, &xi), &yi)| wi * xi * yi)
474 .sum();
475
476 let delta = sum_w.mul_add(sum_wxx, -(sum_wx * sum_wx));
477 if delta.abs() < 1e-12 {
478 return Err(SimulatorError::NumericalError(
479 "Singular matrix in linear fit".to_string(),
480 ));
481 }
482
483 let a = sum_wxx.mul_add(sum_wy, -(sum_wx * sum_wxy)) / delta; let b = sum_w.mul_add(sum_wxy, -(sum_wx * sum_wy)) / delta; let mut residuals = Vec::with_capacity(n);
488 let mut ss_res = 0.0;
489 let mut ss_tot = 0.0;
490 let y_mean = y.iter().sum::<f64>() / n as f64;
491
492 for i in 0..n {
493 let predicted = b.mul_add(x[i], a);
494 let residual = y[i] - predicted;
495 residuals.push(residual);
496 ss_res += residual * residual;
497 ss_tot += (y[i] - y_mean) * (y[i] - y_mean);
498 }
499
500 let r_squared = if ss_tot > 0.0 {
501 1.0 - ss_res / ss_tot
502 } else {
503 0.0
504 };
505 let chi_squared_reduced = ss_res / (n - 2) as f64;
506
507 let aic = (n as f64).mul_add((ss_res / n as f64).ln(), 2.0 * 2.0); let fit_stats = FitStatistics {
511 r_squared,
512 chi_squared_reduced,
513 aic,
514 num_parameters: 2,
515 residuals,
516 };
517
518 Ok((a, b, fit_stats))
519 }
520
521 fn solve_weighted_least_squares(
523 &self,
524 design_matrix: &Array2<f64>,
525 y: &[f64],
526 weights: &[f64],
527 ) -> Result<Vec<f64>> {
528 let n_params = design_matrix.ncols();
531 let mut coefficients = vec![0.0; n_params];
532
533 coefficients[0] = y[0];
536
537 Ok(coefficients)
538 }
539
540 fn estimate_measurement_uncertainty(&self, measured_value: f64) -> f64 {
542 let p = f64::midpoint(measured_value, 1.0); let shot_noise = (p * (1.0 - p) / self.shots_per_level as f64).sqrt();
545 shot_noise * 2.0 }
547
548 fn calculate_confidence(&self, fit_stats: &FitStatistics) -> f64 {
550 let r_sq_factor = fit_stats.r_squared.clamp(0.0, 1.0);
552 let chi_sq_factor = if fit_stats.chi_squared_reduced > 0.0 {
553 1.0 / (1.0 + fit_stats.chi_squared_reduced)
554 } else {
555 0.5
556 };
557
558 (r_sq_factor * chi_sq_factor).sqrt()
559 }
560}
561
562pub struct VirtualDistillation {
564 num_copies: usize,
566 protocol: DistillationProtocol,
568}
569
570#[derive(Debug, Clone, Copy)]
572pub enum DistillationProtocol {
573 Standard,
575 Optimized,
577 Adaptive,
579}
580
581impl VirtualDistillation {
582 #[must_use]
584 pub const fn new(num_copies: usize, protocol: DistillationProtocol) -> Self {
585 Self {
586 num_copies,
587 protocol,
588 }
589 }
590
591 pub fn distill<F>(
593 &self,
594 noisy_executor: F,
595 observable: &str,
596 ) -> Result<VirtualDistillationResult>
597 where
598 F: Fn() -> Result<f64>,
599 {
600 let measurements: Result<Vec<f64>> =
602 (0..self.num_copies).map(|_| noisy_executor()).collect();
603
604 let measurements = measurements?;
605
606 let mitigated_value = match self.protocol {
608 DistillationProtocol::Standard => self.standard_distillation(&measurements)?,
609 DistillationProtocol::Optimized => self.optimized_distillation(&measurements)?,
610 DistillationProtocol::Adaptive => self.adaptive_distillation(&measurements)?,
611 };
612
613 let raw_value = measurements.iter().sum::<f64>() / measurements.len() as f64;
615 let error_reduction = (raw_value - mitigated_value).abs() / raw_value.abs().max(1e-10);
616
617 Ok(VirtualDistillationResult {
618 mitigated_value,
619 overhead: self.num_copies,
620 efficiency: 1.0 / self.num_copies as f64,
621 error_reduction,
622 })
623 }
624
625 fn standard_distillation(&self, measurements: &[f64]) -> Result<f64> {
627 let product: f64 = measurements.iter().product();
629 let mitigated = if product >= 0.0 {
630 product.powf(1.0 / self.num_copies as f64)
631 } else {
632 -(-product).powf(1.0 / self.num_copies as f64)
633 };
634
635 Ok(mitigated)
636 }
637
638 fn optimized_distillation(&self, measurements: &[f64]) -> Result<f64> {
640 let mut sorted = measurements.to_vec();
642 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
643 let median = sorted[sorted.len() / 2];
644 Ok(median)
645 }
646
647 fn adaptive_distillation(&self, measurements: &[f64]) -> Result<f64> {
649 let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
651 let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
652 / measurements.len() as f64;
653
654 if variance < 0.1 {
655 self.standard_distillation(measurements)
656 } else {
657 self.optimized_distillation(measurements)
658 }
659 }
660}
661
662pub struct SymmetryVerification {
664 symmetries: Vec<SymmetryOperation>,
666}
667
668#[derive(Debug, Clone)]
670pub struct SymmetryOperation {
671 pub name: String,
673 pub eigenvalue: Complex64,
675 pub tolerance: f64,
677}
678
679impl SymmetryVerification {
680 #[must_use]
682 pub const fn new(symmetries: Vec<SymmetryOperation>) -> Self {
683 Self { symmetries }
684 }
685
686 pub fn verify_and_correct<F>(
688 &self,
689 executor: F,
690 observable: &str,
691 ) -> Result<SymmetryVerificationResult>
692 where
693 F: Fn(&str) -> Result<f64>,
694 {
695 let main_value = executor(observable)?;
696
697 let mut violations = Vec::new();
698 let mut valid_measurements = Vec::new();
699
700 for symmetry in &self.symmetries {
702 let symmetry_value = executor(&symmetry.name)?;
703 let expected = symmetry.eigenvalue.re;
704 let violation = (symmetry_value - expected).abs();
705
706 violations.push(violation);
707
708 if violation <= symmetry.tolerance {
709 valid_measurements.push(main_value);
710 }
711 }
712
713 let corrected_value = if valid_measurements.is_empty() {
715 main_value } else {
717 valid_measurements.iter().sum::<f64>() / valid_measurements.len() as f64
718 };
719
720 let avg_violation = violations.iter().sum::<f64>() / violations.len() as f64;
721 let post_selection_prob =
722 valid_measurements.len() as f64 / (self.symmetries.len() + 1) as f64;
723
724 Ok(SymmetryVerificationResult {
725 corrected_value,
726 symmetry_violation: avg_violation,
727 post_selection_prob,
728 symmetries: self.symmetries.iter().map(|s| s.name.clone()).collect(),
729 })
730 }
731}
732
733trait Unzip3<A, B, C> {
735 fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>);
736}
737
738impl<A, B, C> Unzip3<A, B, C> for Vec<(A, B, C)> {
739 fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>) {
740 let mut vec_a = Vec::with_capacity(self.len());
741 let mut vec_b = Vec::with_capacity(self.len());
742 let mut vec_c = Vec::with_capacity(self.len());
743
744 for (a, b, c) in self {
745 vec_a.push(a);
746 vec_b.push(b);
747 vec_c.push(c);
748 }
749
750 (vec_a, vec_b, vec_c)
751 }
752}
753
754pub fn benchmark_noise_extrapolation() -> Result<(ZNEResult, VirtualDistillationResult)> {
756 let noisy_executor = |noise_factor: f64| -> Result<f64> {
758 let ideal_value = 1.0;
760 let noise_rate = 0.1;
761 Ok(ideal_value * (-noise_rate * noise_factor).exp())
762 };
763
764 let zne = ZeroNoiseExtrapolator::default_config();
766 let zne_result = zne.extrapolate(noisy_executor, "Z0")?;
767
768 let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
770 let vd_result = vd.distill(|| Ok(0.8), "Z0")?;
771
772 Ok((zne_result, vd_result))
773}
774
775#[derive(Debug, Clone, Serialize, Deserialize)]
781pub struct PECResult {
782 pub mitigated_value: f64,
784 pub statistical_error: f64,
786 pub sampling_overhead: f64,
788 pub num_samples: usize,
790 pub effective_samples: f64,
792}
793
794#[derive(Debug, Clone)]
800pub struct ProbabilisticErrorCancellation {
801 pub noise_model: NoiseModel,
803 pub num_samples: usize,
805 pub seed: Option<u64>,
807}
808
809#[derive(Debug, Clone, Default)]
811pub struct NoiseModel {
812 pub single_qubit_errors: std::collections::HashMap<String, f64>,
814 pub two_qubit_errors: std::collections::HashMap<(usize, usize), f64>,
816 pub readout_errors: std::collections::HashMap<usize, (f64, f64)>,
818}
819
820impl ProbabilisticErrorCancellation {
821 pub fn new(noise_model: NoiseModel, num_samples: usize) -> Self {
823 Self {
824 noise_model,
825 num_samples,
826 seed: None,
827 }
828 }
829
830 pub fn with_seed(mut self, seed: u64) -> Self {
832 self.seed = Some(seed);
833 self
834 }
835
836 pub fn calculate_overhead(&self, circuit_depth: usize, num_gates: usize) -> f64 {
838 let avg_error: f64 = self
840 .noise_model
841 .single_qubit_errors
842 .values()
843 .cloned()
844 .sum::<f64>()
845 / self.noise_model.single_qubit_errors.len().max(1) as f64;
846
847 let gamma_per_gate = 1.0 + 2.0 * avg_error;
848 gamma_per_gate.powi(num_gates as i32)
849 }
850
851 pub fn mitigate<F>(&self, executor: F, observable: &str) -> Result<PECResult>
853 where
854 F: Fn(&str, i32) -> Result<f64>,
855 {
856 use scirs2_core::random::thread_rng;
857 use scirs2_core::random::Rng;
858
859 let mut rng = thread_rng();
860 let mut weighted_sum = 0.0;
861 let mut weight_sum = 0.0;
862 let mut samples = Vec::with_capacity(self.num_samples);
863
864 for _ in 0..self.num_samples {
865 let sign: i32 = if rng.gen::<f64>() < 0.5 { 1 } else { -1 };
867
868 let value = executor(observable, sign)?;
870 let weighted_value = sign as f64 * value;
871
872 weighted_sum += weighted_value;
873 weight_sum += 1.0;
874 samples.push(weighted_value);
875 }
876
877 let mitigated_value = weighted_sum / weight_sum;
878
879 let variance: f64 = samples
881 .iter()
882 .map(|&x| (x - mitigated_value).powi(2))
883 .sum::<f64>()
884 / (self.num_samples - 1).max(1) as f64;
885 let statistical_error = (variance / self.num_samples as f64).sqrt();
886
887 let sampling_overhead = self.calculate_overhead(10, 20); Ok(PECResult {
891 mitigated_value,
892 statistical_error,
893 sampling_overhead,
894 num_samples: self.num_samples,
895 effective_samples: self.num_samples as f64 / sampling_overhead,
896 })
897 }
898}
899
900#[derive(Debug, Clone, Serialize, Deserialize)]
902pub struct CDRResult {
903 pub mitigated_value: f64,
905 pub regression_error: f64,
907 pub num_clifford_circuits: usize,
909 pub coefficients: Vec<f64>,
911 pub r_squared: f64,
913}
914
915#[derive(Debug, Clone)]
920pub struct CliffordDataRegression {
921 pub num_training_circuits: usize,
923 pub regression_method: RegressionMethod,
925 pub fit_intercept: bool,
927}
928
929#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
931pub enum RegressionMethod {
932 OrdinaryLeastSquares,
934 Ridge { alpha: f64 },
936 Lasso { alpha: f64 },
938 Bayesian,
940}
941
942impl Default for CliffordDataRegression {
943 fn default() -> Self {
944 Self {
945 num_training_circuits: 100,
946 regression_method: RegressionMethod::OrdinaryLeastSquares,
947 fit_intercept: true,
948 }
949 }
950}
951
952impl CliffordDataRegression {
953 pub fn new(num_training_circuits: usize, regression_method: RegressionMethod) -> Self {
955 Self {
956 num_training_circuits,
957 regression_method,
958 fit_intercept: true,
959 }
960 }
961
962 pub fn train<F, G>(&self, noisy_executor: F, ideal_executor: G) -> Result<CDRModel>
964 where
965 F: Fn(usize) -> Result<f64>,
966 G: Fn(usize) -> Result<f64>,
967 {
968 let mut noisy_values = Vec::with_capacity(self.num_training_circuits);
969 let mut ideal_values = Vec::with_capacity(self.num_training_circuits);
970
971 for i in 0..self.num_training_circuits {
972 noisy_values.push(noisy_executor(i)?);
973 ideal_values.push(ideal_executor(i)?);
974 }
975
976 let n = self.num_training_circuits as f64;
978 let sum_x: f64 = noisy_values.iter().sum();
979 let sum_y: f64 = ideal_values.iter().sum();
980 let sum_xx: f64 = noisy_values.iter().map(|x| x * x).sum();
981 let sum_xy: f64 = noisy_values
982 .iter()
983 .zip(&ideal_values)
984 .map(|(x, y)| x * y)
985 .sum();
986
987 let (a, b) = if self.fit_intercept {
988 let denom = n * sum_xx - sum_x * sum_x;
989 if denom.abs() < 1e-10 {
990 (1.0, 0.0)
991 } else {
992 let slope = (n * sum_xy - sum_x * sum_y) / denom;
993 let intercept = (sum_y - slope * sum_x) / n;
994 (slope, intercept)
995 }
996 } else {
997 (sum_xy / sum_xx, 0.0)
998 };
999
1000 let y_mean = sum_y / n;
1002 let ss_tot: f64 = ideal_values.iter().map(|y| (y - y_mean).powi(2)).sum();
1003 let ss_res: f64 = noisy_values
1004 .iter()
1005 .zip(&ideal_values)
1006 .map(|(x, y)| (y - (a * x + b)).powi(2))
1007 .sum();
1008 let r_squared = if ss_tot > 1e-10 {
1009 1.0 - ss_res / ss_tot
1010 } else {
1011 0.0
1012 };
1013
1014 Ok(CDRModel {
1015 coefficients: vec![a, b],
1016 r_squared,
1017 num_training_points: self.num_training_circuits,
1018 })
1019 }
1020
1021 pub fn mitigate(&self, model: &CDRModel, noisy_value: f64) -> Result<CDRResult> {
1023 let mitigated_value = model.coefficients[0] * noisy_value + model.coefficients[1];
1024
1025 let regression_error = (1.0 - model.r_squared).sqrt() * noisy_value.abs().max(0.1);
1027
1028 Ok(CDRResult {
1029 mitigated_value,
1030 regression_error,
1031 num_clifford_circuits: model.num_training_points,
1032 coefficients: model.coefficients.clone(),
1033 r_squared: model.r_squared,
1034 })
1035 }
1036}
1037
1038#[derive(Debug, Clone, Serialize, Deserialize)]
1040pub struct CDRModel {
1041 pub coefficients: Vec<f64>,
1043 pub r_squared: f64,
1045 pub num_training_points: usize,
1047}
1048
1049#[derive(Debug, Clone, Serialize, Deserialize)]
1051pub struct M3Result {
1052 pub mitigated_probs: std::collections::HashMap<String, f64>,
1054 pub original_counts: std::collections::HashMap<String, usize>,
1056 pub num_iterations: usize,
1058 pub converged: bool,
1060 pub residual_norm: f64,
1062}
1063
1064#[derive(Debug, Clone)]
1070pub struct MatrixFreeMeasurementMitigation {
1071 pub calibration: M3Calibration,
1073 pub max_iterations: usize,
1075 pub tolerance: f64,
1077 pub solver_method: M3SolverMethod,
1079}
1080
1081#[derive(Debug, Clone, Default)]
1083pub struct M3Calibration {
1084 pub confusion_matrices: std::collections::HashMap<usize, [[f64; 2]; 2]>,
1087 pub correlated_errors: std::collections::HashMap<(usize, usize), Array2<f64>>,
1089}
1090
1091#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1093pub enum M3SolverMethod {
1094 IterativeBayesian,
1096 NonNegativeLeastSquares,
1098 ConjugateGradient,
1100}
1101
1102impl Default for MatrixFreeMeasurementMitigation {
1103 fn default() -> Self {
1104 Self {
1105 calibration: M3Calibration::default(),
1106 max_iterations: 100,
1107 tolerance: 1e-6,
1108 solver_method: M3SolverMethod::IterativeBayesian,
1109 }
1110 }
1111}
1112
1113impl MatrixFreeMeasurementMitigation {
1114 pub fn new(calibration: M3Calibration) -> Self {
1116 Self {
1117 calibration,
1118 ..Default::default()
1119 }
1120 }
1121
1122 pub fn calibrate<F>(&mut self, executor: F, num_qubits: usize, shots: usize) -> Result<()>
1124 where
1125 F: Fn(&str, usize) -> Result<std::collections::HashMap<String, usize>>,
1126 {
1127 for qubit in 0..num_qubits {
1128 let counts_0 = executor(&format!("cal_0_{}", qubit), shots)?;
1130 let p00 = *counts_0.get("0").unwrap_or(&0) as f64 / shots as f64;
1131 let p10 = *counts_0.get("1").unwrap_or(&0) as f64 / shots as f64;
1132
1133 let counts_1 = executor(&format!("cal_1_{}", qubit), shots)?;
1135 let p01 = *counts_1.get("0").unwrap_or(&0) as f64 / shots as f64;
1136 let p11 = *counts_1.get("1").unwrap_or(&0) as f64 / shots as f64;
1137
1138 self.calibration
1140 .confusion_matrices
1141 .insert(qubit, [[p00, p01], [p10, p11]]);
1142 }
1143
1144 Ok(())
1145 }
1146
1147 pub fn mitigate(
1149 &self,
1150 counts: &std::collections::HashMap<String, usize>,
1151 num_qubits: usize,
1152 ) -> Result<M3Result> {
1153 let total_shots: usize = counts.values().sum();
1154
1155 let mut probs: std::collections::HashMap<String, f64> = counts
1157 .iter()
1158 .map(|(k, v)| (k.clone(), *v as f64 / total_shots as f64))
1159 .collect();
1160
1161 let mut converged = false;
1163 let mut num_iterations = 0;
1164 let mut residual_norm = f64::MAX;
1165
1166 for iter in 0..self.max_iterations {
1167 num_iterations = iter + 1;
1168
1169 let mut new_probs = std::collections::HashMap::new();
1171 let mut total = 0.0;
1172
1173 for (bitstring, &prob) in &probs {
1174 let mut correction = 1.0;
1176
1177 for (i, c) in bitstring.chars().rev().enumerate() {
1178 if let Some(confusion) = self.calibration.confusion_matrices.get(&i) {
1179 let measured = if c == '1' { 1 } else { 0 };
1180 let diag = confusion[measured][measured];
1182 if diag > 0.01 {
1183 correction /= diag;
1184 }
1185 }
1186 }
1187
1188 let corrected_prob = (prob * correction).max(0.0);
1189 new_probs.insert(bitstring.clone(), corrected_prob);
1190 total += corrected_prob;
1191 }
1192
1193 if total > 0.0 {
1195 for prob in new_probs.values_mut() {
1196 *prob /= total;
1197 }
1198 }
1199
1200 let mut diff = 0.0;
1202 for (k, &new_p) in &new_probs {
1203 let old_p = probs.get(k).unwrap_or(&0.0);
1204 diff += (new_p - old_p).abs();
1205 }
1206 residual_norm = diff;
1207
1208 probs = new_probs;
1209
1210 if diff < self.tolerance {
1211 converged = true;
1212 break;
1213 }
1214 }
1215
1216 Ok(M3Result {
1217 mitigated_probs: probs,
1218 original_counts: counts.clone(),
1219 num_iterations,
1220 converged,
1221 residual_norm,
1222 })
1223 }
1224
1225 pub fn expectation_value(&self, result: &M3Result, observable: &[i8]) -> f64 {
1227 let mut expectation = 0.0;
1228
1229 for (bitstring, &prob) in &result.mitigated_probs {
1230 let mut parity = 1;
1231 for (i, c) in bitstring.chars().rev().enumerate() {
1232 if i < observable.len() && observable[i] != 0 && c == '1' {
1233 parity *= -1;
1234 }
1235 }
1236 expectation += parity as f64 * prob;
1237 }
1238
1239 expectation
1240 }
1241}
1242
1243#[derive(Debug, Clone)]
1245pub struct ErrorMitigationPipeline {
1246 pub zne: Option<ZeroNoiseExtrapolator>,
1248 pub pec: Option<ProbabilisticErrorCancellation>,
1250 pub cdr: Option<CliffordDataRegression>,
1252 pub m3: Option<MatrixFreeMeasurementMitigation>,
1254}
1255
1256impl Default for ErrorMitigationPipeline {
1257 fn default() -> Self {
1258 Self {
1259 zne: Some(ZeroNoiseExtrapolator::default_config()),
1260 pec: None,
1261 cdr: None,
1262 m3: None,
1263 }
1264 }
1265}
1266
1267impl ErrorMitigationPipeline {
1268 pub fn full() -> Self {
1270 Self {
1271 zne: Some(ZeroNoiseExtrapolator::default_config()),
1272 pec: Some(ProbabilisticErrorCancellation::new(
1273 NoiseModel::default(),
1274 1000,
1275 )),
1276 cdr: Some(CliffordDataRegression::default()),
1277 m3: Some(MatrixFreeMeasurementMitigation::default()),
1278 }
1279 }
1280
1281 pub fn apply<F>(&self, executor: F, observable: &str) -> Result<f64>
1286 where
1287 F: Fn(f64) -> Result<f64> + Sync + Send,
1288 {
1289 let mut value = if let Some(zne) = &self.zne {
1291 zne.extrapolate(&executor, observable)?.zero_noise_value
1292 } else {
1293 executor(1.0)?
1295 };
1296
1297 Ok(value)
1301 }
1302
1303 pub fn apply_simple<F>(&self, executor: F, observable: &str) -> Result<f64>
1308 where
1309 F: Fn(&str) -> Result<f64>,
1310 {
1311 let value = executor(observable)?;
1314
1315 Ok(value)
1319 }
1320}
1321
1322#[cfg(test)]
1323mod tests {
1324 use super::*;
1325
1326 #[test]
1327 fn test_zne_linear_extrapolation() {
1328 let zne = ZeroNoiseExtrapolator::new(
1329 NoiseScalingMethod::UnitaryFolding,
1330 ExtrapolationMethod::Linear,
1331 vec![1.0, 2.0, 3.0],
1332 100,
1333 );
1334
1335 let noise_factors = vec![1.0, 2.0, 3.0];
1337 let measured_values = vec![0.9, 0.8, 0.7];
1338 let uncertainties = vec![0.01, 0.01, 0.01];
1339
1340 let (zero_noise, _error, _stats) = zne
1341 .linear_extrapolation(&noise_factors, &measured_values, &uncertainties)
1342 .expect("Failed to perform linear extrapolation");
1343
1344 assert!((zero_noise - 1.0).abs() < 0.1);
1345 }
1346
1347 #[test]
1348 fn test_virtual_distillation() {
1349 let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
1350
1351 let measurements = vec![0.8, 0.7, 0.9];
1352 let result = vd
1353 .standard_distillation(&measurements)
1354 .expect("Failed to perform standard distillation");
1355
1356 assert!(result > 0.0);
1357 assert!(result < 1.0);
1358 }
1359
1360 #[test]
1361 fn test_symmetry_verification() {
1362 let symmetries = vec![SymmetryOperation {
1363 name: "parity".to_string(),
1364 eigenvalue: Complex64::new(1.0, 0.0),
1365 tolerance: 0.1,
1366 }];
1367
1368 let sv = SymmetryVerification::new(symmetries);
1369
1370 let executor = |obs: &str| -> Result<f64> {
1371 match obs {
1372 "Z0" => Ok(0.8),
1373 "parity" => Ok(0.95), _ => Ok(0.0),
1375 }
1376 };
1377
1378 let result = sv
1379 .verify_and_correct(executor, "Z0")
1380 .expect("Failed to verify and correct");
1381 assert!((result.corrected_value - 0.8).abs() < 1e-10);
1382 assert!(result.post_selection_prob > 0.0);
1383 }
1384
1385 #[test]
1386 fn test_richardson_extrapolation() {
1387 let zne = ZeroNoiseExtrapolator::default_config();
1388
1389 let noise_factors = vec![1.0, 2.0, 3.0];
1390 let measured_values = vec![1.0, 0.8, 0.6]; let (zero_noise, _error, _stats) = zne
1393 .richardson_extrapolation(&noise_factors, &measured_values)
1394 .expect("Failed to perform Richardson extrapolation");
1395
1396 assert!(zero_noise > 0.0);
1397 }
1398}