1use scirs2_core::ndarray::Array2;
8use scirs2_core::parallel_ops::*;
9use scirs2_core::Complex64;
10use serde::{Deserialize, Serialize};
11
12use crate::error::{Result, SimulatorError};
13
14#[derive(Debug, Clone, Serialize, Deserialize)]
16pub struct ZNEResult {
17 pub zero_noise_value: f64,
19 pub error_estimate: f64,
21 pub noise_factors: Vec<f64>,
23 pub measured_values: Vec<f64>,
25 pub uncertainties: Vec<f64>,
27 pub method: ExtrapolationMethod,
29 pub confidence: f64,
31 pub fit_stats: FitStatistics,
33}
34
35#[derive(Debug, Clone, Serialize, Deserialize)]
37pub struct VirtualDistillationResult {
38 pub mitigated_value: f64,
40 pub overhead: usize,
42 pub efficiency: f64,
44 pub error_reduction: f64,
46}
47
48#[derive(Debug, Clone, Serialize, Deserialize)]
50pub struct SymmetryVerificationResult {
51 pub corrected_value: f64,
53 pub symmetry_violation: f64,
55 pub post_selection_prob: f64,
57 pub symmetries: Vec<String>,
59}
60
61#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
63pub enum ExtrapolationMethod {
64 Linear,
66 Exponential,
68 Polynomial(usize),
70 Richardson,
72 Adaptive,
74}
75
76#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
78pub enum NoiseScalingMethod {
79 UnitaryFolding,
81 LocalFolding,
83 ParameterScaling,
85 IdentityInsertion,
87 PauliTwirling,
89}
90
91#[derive(Debug, Clone, Default, Serialize, Deserialize)]
93pub struct FitStatistics {
94 pub r_squared: f64,
96 pub chi_squared_reduced: f64,
98 pub aic: f64,
100 pub num_parameters: usize,
102 pub residuals: Vec<f64>,
104}
105
106pub struct ZeroNoiseExtrapolator {
108 scaling_method: NoiseScalingMethod,
110 extrapolation_method: ExtrapolationMethod,
112 noise_factors: Vec<f64>,
114 shots_per_level: usize,
116 max_poly_order: usize,
118}
119
120impl ZeroNoiseExtrapolator {
121 pub const fn new(
123 scaling_method: NoiseScalingMethod,
124 extrapolation_method: ExtrapolationMethod,
125 noise_factors: Vec<f64>,
126 shots_per_level: usize,
127 ) -> Self {
128 Self {
129 scaling_method,
130 extrapolation_method,
131 noise_factors,
132 shots_per_level,
133 max_poly_order: 4,
134 }
135 }
136
137 pub fn default_config() -> Self {
139 Self::new(
140 NoiseScalingMethod::UnitaryFolding,
141 ExtrapolationMethod::Linear,
142 vec![1.0, 1.5, 2.0, 2.5, 3.0],
143 1000,
144 )
145 }
146
147 pub fn extrapolate<F>(&self, noisy_executor: F, observable: &str) -> Result<ZNEResult>
149 where
150 F: Fn(f64) -> Result<f64> + Sync + Send,
151 {
152 let start_time = std::time::Instant::now();
153
154 let measurements: Result<Vec<(f64, f64, f64)>> = self
156 .noise_factors
157 .par_iter()
158 .map(|&noise_factor| {
159 let measured_value = noisy_executor(noise_factor)?;
161
162 let uncertainty = self.estimate_measurement_uncertainty(measured_value);
164
165 Ok((noise_factor, measured_value, uncertainty))
166 })
167 .collect();
168
169 let measurements = measurements?;
170 let (noise_factors, measured_values, uncertainties): (Vec<f64>, Vec<f64>, Vec<f64>) =
171 measurements.unzip3();
172
173 let (zero_noise_value, error_estimate, fit_stats) = match self.extrapolation_method {
175 ExtrapolationMethod::Linear => {
176 self.linear_extrapolation(&noise_factors, &measured_values, &uncertainties)?
177 }
178 ExtrapolationMethod::Exponential => {
179 self.exponential_extrapolation(&noise_factors, &measured_values, &uncertainties)?
180 }
181 ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
182 &noise_factors,
183 &measured_values,
184 &uncertainties,
185 order,
186 )?,
187 ExtrapolationMethod::Richardson => {
188 self.richardson_extrapolation(&noise_factors, &measured_values)?
189 }
190 ExtrapolationMethod::Adaptive => {
191 self.adaptive_extrapolation(&noise_factors, &measured_values, &uncertainties)?
192 }
193 };
194
195 let confidence = self.calculate_confidence(&fit_stats);
197
198 Ok(ZNEResult {
199 zero_noise_value,
200 error_estimate,
201 noise_factors,
202 measured_values,
203 uncertainties,
204 method: self.extrapolation_method,
205 confidence,
206 fit_stats,
207 })
208 }
209
210 fn linear_extrapolation(
212 &self,
213 noise_factors: &[f64],
214 measured_values: &[f64],
215 uncertainties: &[f64],
216 ) -> Result<(f64, f64, FitStatistics)> {
217 if noise_factors.len() < 2 {
218 return Err(SimulatorError::InvalidInput(
219 "Need at least 2 data points for linear extrapolation".to_string(),
220 ));
221 }
222
223 let (a, b, fit_stats) =
225 self.weighted_linear_fit(noise_factors, measured_values, uncertainties)?;
226
227 let zero_noise_value = a;
229
230 let error_estimate = fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
232 / fit_stats.residuals.len() as f64;
233
234 Ok((zero_noise_value, error_estimate, fit_stats))
235 }
236
237 fn exponential_extrapolation(
239 &self,
240 noise_factors: &[f64],
241 measured_values: &[f64],
242 uncertainties: &[f64],
243 ) -> Result<(f64, f64, FitStatistics)> {
244 let log_values: Vec<f64> = measured_values
246 .iter()
247 .map(|&y| if y > 0.0 { y.ln() } else { f64::NEG_INFINITY })
248 .collect();
249
250 if log_values.iter().any(|&x| x.is_infinite()) {
252 return Err(SimulatorError::NumericalError(
253 "Cannot take logarithm of non-positive values for exponential fit".to_string(),
254 ));
255 }
256
257 let log_uncertainties: Vec<f64> = uncertainties.iter().zip(measured_values.iter())
259 .map(|(&u, &y)| u / y.abs()) .collect();
261
262 let (ln_a, b, mut fit_stats) =
263 self.weighted_linear_fit(noise_factors, &log_values, &log_uncertainties)?;
264
265 let a = ln_a.exp();
267 let zero_noise_value = a; let error_estimate = a * fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
271 / fit_stats.residuals.len() as f64;
272
273 fit_stats.residuals = fit_stats
275 .residuals
276 .iter()
277 .zip(noise_factors.iter())
278 .map(|(&res, &x)| {
279 a.mul_add(
280 (b * x).exp(),
281 -measured_values[noise_factors.iter().position(|&nx| nx == x).unwrap()],
282 )
283 })
284 .collect();
285
286 Ok((zero_noise_value, error_estimate, fit_stats))
287 }
288
289 fn polynomial_extrapolation(
291 &self,
292 noise_factors: &[f64],
293 measured_values: &[f64],
294 uncertainties: &[f64],
295 order: usize,
296 ) -> Result<(f64, f64, FitStatistics)> {
297 if noise_factors.len() <= order {
298 return Err(SimulatorError::InvalidInput(format!(
299 "Need more than {order} data points for order {order} polynomial"
300 )));
301 }
302
303 let n = noise_factors.len();
305 let mut design_matrix = Array2::zeros((n, order + 1));
306
307 for (i, &x) in noise_factors.iter().enumerate() {
308 for j in 0..=order {
309 design_matrix[[i, j]] = x.powi(j as i32);
310 }
311 }
312
313 let coefficients =
315 self.solve_weighted_least_squares(&design_matrix, measured_values, uncertainties)?;
316
317 let zero_noise_value = coefficients[0];
319
320 let mut residuals = Vec::with_capacity(n);
322 for (i, &x) in noise_factors.iter().enumerate() {
323 let predicted: f64 = coefficients
324 .iter()
325 .enumerate()
326 .map(|(j, &coeff)| coeff * x.powi(j as i32))
327 .sum();
328 residuals.push(measured_values[i] - predicted);
329 }
330
331 let error_estimate =
332 residuals.iter().map(|r| r.abs()).sum::<f64>() / residuals.len() as f64;
333
334 let fit_stats = FitStatistics {
335 residuals,
336 num_parameters: order + 1,
337 ..Default::default()
338 };
339
340 Ok((zero_noise_value, error_estimate, fit_stats))
341 }
342
343 fn richardson_extrapolation(
345 &self,
346 noise_factors: &[f64],
347 measured_values: &[f64],
348 ) -> Result<(f64, f64, FitStatistics)> {
349 if noise_factors.len() < 3 {
350 return Err(SimulatorError::InvalidInput(
351 "Richardson extrapolation requires at least 3 data points".to_string(),
352 ));
353 }
354
355 let x1 = noise_factors[0];
357 let x2 = noise_factors[1];
358 let x3 = noise_factors[2];
359 let y1 = measured_values[0];
360 let y2 = measured_values[1];
361 let y3 = measured_values[2];
362
363 let denominator = (x1 - x2) * (x1 - x3) * (x2 - x3);
365 if denominator.abs() < 1e-12 {
366 return Err(SimulatorError::NumericalError(
367 "Noise factors too close for Richardson extrapolation".to_string(),
368 ));
369 }
370
371 let a = (y3 * x1 * x2).mul_add(
372 x1 - x2,
373 (y1 * x2 * x3).mul_add(x2 - x3, y2 * x1 * x3 * (x3 - x1)),
374 ) / denominator;
375
376 let zero_noise_value = a;
377
378 let error_estimate = (y1 - y2).abs().max((y2 - y3).abs()) * 0.1;
380
381 let fit_stats = FitStatistics {
382 num_parameters: 3,
383 ..Default::default()
384 };
385
386 Ok((zero_noise_value, error_estimate, fit_stats))
387 }
388
389 fn adaptive_extrapolation(
391 &self,
392 noise_factors: &[f64],
393 measured_values: &[f64],
394 uncertainties: &[f64],
395 ) -> Result<(f64, f64, FitStatistics)> {
396 let mut best_result = None;
397 let mut best_aic = f64::INFINITY;
398
399 let methods = vec![
401 ExtrapolationMethod::Linear,
402 ExtrapolationMethod::Exponential,
403 ExtrapolationMethod::Polynomial(2),
404 ExtrapolationMethod::Polynomial(3),
405 ];
406
407 for method in methods {
408 let result = match method {
409 ExtrapolationMethod::Linear => {
410 self.linear_extrapolation(noise_factors, measured_values, uncertainties)
411 }
412 ExtrapolationMethod::Exponential => {
413 self.exponential_extrapolation(noise_factors, measured_values, uncertainties)
414 }
415 ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
416 noise_factors,
417 measured_values,
418 uncertainties,
419 order,
420 ),
421 _ => continue,
422 };
423
424 if let Ok((value, error, stats)) = result {
425 let aic = stats.aic;
426 if aic < best_aic {
427 best_aic = aic;
428 best_result = Some((value, error, stats));
429 }
430 }
431 }
432
433 best_result.ok_or_else(|| {
434 SimulatorError::ComputationError("No extrapolation method succeeded".to_string())
435 })
436 }
437
438 fn weighted_linear_fit(
440 &self,
441 x: &[f64],
442 y: &[f64],
443 weights: &[f64],
444 ) -> Result<(f64, f64, FitStatistics)> {
445 let n = x.len();
446 if n < 2 {
447 return Err(SimulatorError::InvalidInput(
448 "Need at least 2 points for linear fit".to_string(),
449 ));
450 }
451
452 let w: Vec<f64> = weights
454 .iter()
455 .map(|&sigma| {
456 if sigma > 0.0 {
457 1.0 / (sigma * sigma)
458 } else {
459 1.0
460 }
461 })
462 .collect();
463
464 let sum_w: f64 = w.iter().sum();
466 let sum_wx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi).sum();
467 let sum_wy: f64 = w.iter().zip(y.iter()).map(|(&wi, &yi)| wi * yi).sum();
468 let sum_wxx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi * xi).sum();
469 let sum_wxy: f64 = w
470 .iter()
471 .zip(x.iter())
472 .zip(y.iter())
473 .map(|((&wi, &xi), &yi)| wi * xi * yi)
474 .sum();
475
476 let delta = sum_w.mul_add(sum_wxx, -(sum_wx * sum_wx));
477 if delta.abs() < 1e-12 {
478 return Err(SimulatorError::NumericalError(
479 "Singular matrix in linear fit".to_string(),
480 ));
481 }
482
483 let a = sum_wxx.mul_add(sum_wy, -(sum_wx * sum_wxy)) / delta; let b = sum_w.mul_add(sum_wxy, -(sum_wx * sum_wy)) / delta; let mut residuals = Vec::with_capacity(n);
488 let mut ss_res = 0.0;
489 let mut ss_tot = 0.0;
490 let y_mean = y.iter().sum::<f64>() / n as f64;
491
492 for i in 0..n {
493 let predicted = b.mul_add(x[i], a);
494 let residual = y[i] - predicted;
495 residuals.push(residual);
496 ss_res += residual * residual;
497 ss_tot += (y[i] - y_mean) * (y[i] - y_mean);
498 }
499
500 let r_squared = if ss_tot > 0.0 {
501 1.0 - ss_res / ss_tot
502 } else {
503 0.0
504 };
505 let chi_squared_reduced = ss_res / (n - 2) as f64;
506
507 let aic = (n as f64).mul_add((ss_res / n as f64).ln(), 2.0 * 2.0); let fit_stats = FitStatistics {
511 r_squared,
512 chi_squared_reduced,
513 aic,
514 num_parameters: 2,
515 residuals,
516 };
517
518 Ok((a, b, fit_stats))
519 }
520
521 fn solve_weighted_least_squares(
523 &self,
524 design_matrix: &Array2<f64>,
525 y: &[f64],
526 weights: &[f64],
527 ) -> Result<Vec<f64>> {
528 let n_params = design_matrix.ncols();
531 let mut coefficients = vec![0.0; n_params];
532
533 coefficients[0] = y[0];
536
537 Ok(coefficients)
538 }
539
540 fn estimate_measurement_uncertainty(&self, measured_value: f64) -> f64 {
542 let p = f64::midpoint(measured_value, 1.0); let shot_noise = (p * (1.0 - p) / self.shots_per_level as f64).sqrt();
545 shot_noise * 2.0 }
547
548 fn calculate_confidence(&self, fit_stats: &FitStatistics) -> f64 {
550 let r_sq_factor = fit_stats.r_squared.max(0.0).min(1.0);
552 let chi_sq_factor = if fit_stats.chi_squared_reduced > 0.0 {
553 1.0 / (1.0 + fit_stats.chi_squared_reduced)
554 } else {
555 0.5
556 };
557
558 (r_sq_factor * chi_sq_factor).sqrt()
559 }
560}
561
562pub struct VirtualDistillation {
564 num_copies: usize,
566 protocol: DistillationProtocol,
568}
569
570#[derive(Debug, Clone, Copy)]
572pub enum DistillationProtocol {
573 Standard,
575 Optimized,
577 Adaptive,
579}
580
581impl VirtualDistillation {
582 pub const fn new(num_copies: usize, protocol: DistillationProtocol) -> Self {
584 Self {
585 num_copies,
586 protocol,
587 }
588 }
589
590 pub fn distill<F>(
592 &self,
593 noisy_executor: F,
594 observable: &str,
595 ) -> Result<VirtualDistillationResult>
596 where
597 F: Fn() -> Result<f64>,
598 {
599 let measurements: Result<Vec<f64>> =
601 (0..self.num_copies).map(|_| noisy_executor()).collect();
602
603 let measurements = measurements?;
604
605 let mitigated_value = match self.protocol {
607 DistillationProtocol::Standard => self.standard_distillation(&measurements)?,
608 DistillationProtocol::Optimized => self.optimized_distillation(&measurements)?,
609 DistillationProtocol::Adaptive => self.adaptive_distillation(&measurements)?,
610 };
611
612 let raw_value = measurements.iter().sum::<f64>() / measurements.len() as f64;
614 let error_reduction = (raw_value - mitigated_value).abs() / raw_value.abs().max(1e-10);
615
616 Ok(VirtualDistillationResult {
617 mitigated_value,
618 overhead: self.num_copies,
619 efficiency: 1.0 / self.num_copies as f64,
620 error_reduction,
621 })
622 }
623
624 fn standard_distillation(&self, measurements: &[f64]) -> Result<f64> {
626 let product: f64 = measurements.iter().product();
628 let mitigated = if product >= 0.0 {
629 product.powf(1.0 / self.num_copies as f64)
630 } else {
631 -(-product).powf(1.0 / self.num_copies as f64)
632 };
633
634 Ok(mitigated)
635 }
636
637 fn optimized_distillation(&self, measurements: &[f64]) -> Result<f64> {
639 let mut sorted = measurements.to_vec();
641 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
642 let median = sorted[sorted.len() / 2];
643 Ok(median)
644 }
645
646 fn adaptive_distillation(&self, measurements: &[f64]) -> Result<f64> {
648 let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
650 let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
651 / measurements.len() as f64;
652
653 if variance < 0.1 {
654 self.standard_distillation(measurements)
655 } else {
656 self.optimized_distillation(measurements)
657 }
658 }
659}
660
661pub struct SymmetryVerification {
663 symmetries: Vec<SymmetryOperation>,
665}
666
667#[derive(Debug, Clone)]
669pub struct SymmetryOperation {
670 pub name: String,
672 pub eigenvalue: Complex64,
674 pub tolerance: f64,
676}
677
678impl SymmetryVerification {
679 pub const fn new(symmetries: Vec<SymmetryOperation>) -> Self {
681 Self { symmetries }
682 }
683
684 pub fn verify_and_correct<F>(
686 &self,
687 executor: F,
688 observable: &str,
689 ) -> Result<SymmetryVerificationResult>
690 where
691 F: Fn(&str) -> Result<f64>,
692 {
693 let main_value = executor(observable)?;
694
695 let mut violations = Vec::new();
696 let mut valid_measurements = Vec::new();
697
698 for symmetry in &self.symmetries {
700 let symmetry_value = executor(&symmetry.name)?;
701 let expected = symmetry.eigenvalue.re;
702 let violation = (symmetry_value - expected).abs();
703
704 violations.push(violation);
705
706 if violation <= symmetry.tolerance {
707 valid_measurements.push(main_value);
708 }
709 }
710
711 let corrected_value = if valid_measurements.is_empty() {
713 main_value } else {
715 valid_measurements.iter().sum::<f64>() / valid_measurements.len() as f64
716 };
717
718 let avg_violation = violations.iter().sum::<f64>() / violations.len() as f64;
719 let post_selection_prob =
720 valid_measurements.len() as f64 / (self.symmetries.len() + 1) as f64;
721
722 Ok(SymmetryVerificationResult {
723 corrected_value,
724 symmetry_violation: avg_violation,
725 post_selection_prob,
726 symmetries: self.symmetries.iter().map(|s| s.name.clone()).collect(),
727 })
728 }
729}
730
731trait Unzip3<A, B, C> {
733 fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>);
734}
735
736impl<A, B, C> Unzip3<A, B, C> for Vec<(A, B, C)> {
737 fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>) {
738 let mut vec_a = Vec::with_capacity(self.len());
739 let mut vec_b = Vec::with_capacity(self.len());
740 let mut vec_c = Vec::with_capacity(self.len());
741
742 for (a, b, c) in self {
743 vec_a.push(a);
744 vec_b.push(b);
745 vec_c.push(c);
746 }
747
748 (vec_a, vec_b, vec_c)
749 }
750}
751
752pub fn benchmark_noise_extrapolation() -> Result<(ZNEResult, VirtualDistillationResult)> {
754 let noisy_executor = |noise_factor: f64| -> Result<f64> {
756 let ideal_value = 1.0;
758 let noise_rate = 0.1;
759 Ok(ideal_value * (-noise_rate * noise_factor).exp())
760 };
761
762 let zne = ZeroNoiseExtrapolator::default_config();
764 let zne_result = zne.extrapolate(noisy_executor, "Z0")?;
765
766 let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
768 let vd_result = vd.distill(|| Ok(0.8), "Z0")?;
769
770 Ok((zne_result, vd_result))
771}
772
773#[cfg(test)]
774mod tests {
775 use super::*;
776
777 #[test]
778 fn test_zne_linear_extrapolation() {
779 let zne = ZeroNoiseExtrapolator::new(
780 NoiseScalingMethod::UnitaryFolding,
781 ExtrapolationMethod::Linear,
782 vec![1.0, 2.0, 3.0],
783 100,
784 );
785
786 let noise_factors = vec![1.0, 2.0, 3.0];
788 let measured_values = vec![0.9, 0.8, 0.7];
789 let uncertainties = vec![0.01, 0.01, 0.01];
790
791 let (zero_noise, _error, _stats) = zne
792 .linear_extrapolation(&noise_factors, &measured_values, &uncertainties)
793 .unwrap();
794
795 assert!((zero_noise - 1.0).abs() < 0.1);
796 }
797
798 #[test]
799 fn test_virtual_distillation() {
800 let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
801
802 let measurements = vec![0.8, 0.7, 0.9];
803 let result = vd.standard_distillation(&measurements).unwrap();
804
805 assert!(result > 0.0);
806 assert!(result < 1.0);
807 }
808
809 #[test]
810 fn test_symmetry_verification() {
811 let symmetries = vec![SymmetryOperation {
812 name: "parity".to_string(),
813 eigenvalue: Complex64::new(1.0, 0.0),
814 tolerance: 0.1,
815 }];
816
817 let sv = SymmetryVerification::new(symmetries);
818
819 let executor = |obs: &str| -> Result<f64> {
820 match obs {
821 "Z0" => Ok(0.8),
822 "parity" => Ok(0.95), _ => Ok(0.0),
824 }
825 };
826
827 let result = sv.verify_and_correct(executor, "Z0").unwrap();
828 assert!((result.corrected_value - 0.8).abs() < 1e-10);
829 assert!(result.post_selection_prob > 0.0);
830 }
831
832 #[test]
833 fn test_richardson_extrapolation() {
834 let zne = ZeroNoiseExtrapolator::default_config();
835
836 let noise_factors = vec![1.0, 2.0, 3.0];
837 let measured_values = vec![1.0, 0.8, 0.6]; let (zero_noise, _error, _stats) = zne
840 .richardson_extrapolation(&noise_factors, &measured_values)
841 .unwrap();
842
843 assert!(zero_noise > 0.0);
844 }
845}