use scirs2_core::ndarray::Array2;
use scirs2_core::parallel_ops::{
IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator,
};
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use crate::error::{Result, SimulatorError};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ZNEResult {
pub zero_noise_value: f64,
pub error_estimate: f64,
pub noise_factors: Vec<f64>,
pub measured_values: Vec<f64>,
pub uncertainties: Vec<f64>,
pub method: ExtrapolationMethod,
pub confidence: f64,
pub fit_stats: FitStatistics,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtualDistillationResult {
pub mitigated_value: f64,
pub overhead: usize,
pub efficiency: f64,
pub error_reduction: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SymmetryVerificationResult {
pub corrected_value: f64,
pub symmetry_violation: f64,
pub post_selection_prob: f64,
pub symmetries: Vec<String>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum ExtrapolationMethod {
Linear,
Exponential,
Polynomial(usize),
Richardson,
Adaptive,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum NoiseScalingMethod {
UnitaryFolding,
LocalFolding,
ParameterScaling,
IdentityInsertion,
PauliTwirling,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct FitStatistics {
pub r_squared: f64,
pub chi_squared_reduced: f64,
pub aic: f64,
pub num_parameters: usize,
pub residuals: Vec<f64>,
}
#[derive(Debug, Clone)]
pub struct ZeroNoiseExtrapolator {
scaling_method: NoiseScalingMethod,
extrapolation_method: ExtrapolationMethod,
noise_factors: Vec<f64>,
shots_per_level: usize,
max_poly_order: usize,
}
impl ZeroNoiseExtrapolator {
#[must_use]
pub const fn new(
scaling_method: NoiseScalingMethod,
extrapolation_method: ExtrapolationMethod,
noise_factors: Vec<f64>,
shots_per_level: usize,
) -> Self {
Self {
scaling_method,
extrapolation_method,
noise_factors,
shots_per_level,
max_poly_order: 4,
}
}
#[must_use]
pub fn default_config() -> Self {
Self::new(
NoiseScalingMethod::UnitaryFolding,
ExtrapolationMethod::Linear,
vec![1.0, 1.5, 2.0, 2.5, 3.0],
1000,
)
}
pub fn extrapolate<F>(&self, noisy_executor: F, observable: &str) -> Result<ZNEResult>
where
F: Fn(f64) -> Result<f64> + Sync + Send,
{
let start_time = std::time::Instant::now();
let measurements: Result<Vec<(f64, f64, f64)>> = self
.noise_factors
.par_iter()
.map(|&noise_factor| {
let measured_value = noisy_executor(noise_factor)?;
let uncertainty = self.estimate_measurement_uncertainty(measured_value);
Ok((noise_factor, measured_value, uncertainty))
})
.collect();
let measurements = measurements?;
let (noise_factors, measured_values, uncertainties): (Vec<f64>, Vec<f64>, Vec<f64>) =
measurements.unzip3();
let (zero_noise_value, error_estimate, fit_stats) = match self.extrapolation_method {
ExtrapolationMethod::Linear => {
self.linear_extrapolation(&noise_factors, &measured_values, &uncertainties)?
}
ExtrapolationMethod::Exponential => {
self.exponential_extrapolation(&noise_factors, &measured_values, &uncertainties)?
}
ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
&noise_factors,
&measured_values,
&uncertainties,
order,
)?,
ExtrapolationMethod::Richardson => {
self.richardson_extrapolation(&noise_factors, &measured_values)?
}
ExtrapolationMethod::Adaptive => {
self.adaptive_extrapolation(&noise_factors, &measured_values, &uncertainties)?
}
};
let confidence = self.calculate_confidence(&fit_stats);
Ok(ZNEResult {
zero_noise_value,
error_estimate,
noise_factors,
measured_values,
uncertainties,
method: self.extrapolation_method,
confidence,
fit_stats,
})
}
fn linear_extrapolation(
&self,
noise_factors: &[f64],
measured_values: &[f64],
uncertainties: &[f64],
) -> Result<(f64, f64, FitStatistics)> {
if noise_factors.len() < 2 {
return Err(SimulatorError::InvalidInput(
"Need at least 2 data points for linear extrapolation".to_string(),
));
}
let (a, b, fit_stats) =
self.weighted_linear_fit(noise_factors, measured_values, uncertainties)?;
let zero_noise_value = a;
let error_estimate = fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
/ fit_stats.residuals.len() as f64;
Ok((zero_noise_value, error_estimate, fit_stats))
}
fn exponential_extrapolation(
&self,
noise_factors: &[f64],
measured_values: &[f64],
uncertainties: &[f64],
) -> Result<(f64, f64, FitStatistics)> {
let log_values: Vec<f64> = measured_values
.iter()
.map(|&y| if y > 0.0 { y.ln() } else { f64::NEG_INFINITY })
.collect();
if log_values.iter().any(|&x| x.is_infinite()) {
return Err(SimulatorError::NumericalError(
"Cannot take logarithm of non-positive values for exponential fit".to_string(),
));
}
let log_uncertainties: Vec<f64> = uncertainties.iter().zip(measured_values.iter())
.map(|(&u, &y)| u / y.abs()) .collect();
let (ln_a, b, mut fit_stats) =
self.weighted_linear_fit(noise_factors, &log_values, &log_uncertainties)?;
let a = ln_a.exp();
let zero_noise_value = a;
let error_estimate = a * fit_stats.residuals.iter().map(|r| r.abs()).sum::<f64>()
/ fit_stats.residuals.len() as f64;
fit_stats.residuals = fit_stats
.residuals
.iter()
.zip(noise_factors.iter().enumerate())
.map(|(&_res, (idx, &x))| a.mul_add((b * x).exp(), -measured_values[idx]))
.collect();
Ok((zero_noise_value, error_estimate, fit_stats))
}
fn polynomial_extrapolation(
&self,
noise_factors: &[f64],
measured_values: &[f64],
uncertainties: &[f64],
order: usize,
) -> Result<(f64, f64, FitStatistics)> {
if noise_factors.len() <= order {
return Err(SimulatorError::InvalidInput(format!(
"Need more than {order} data points for order {order} polynomial"
)));
}
let n = noise_factors.len();
let mut design_matrix = Array2::zeros((n, order + 1));
for (i, &x) in noise_factors.iter().enumerate() {
for j in 0..=order {
design_matrix[[i, j]] = x.powi(j as i32);
}
}
let coefficients =
self.solve_weighted_least_squares(&design_matrix, measured_values, uncertainties)?;
let zero_noise_value = coefficients[0];
let mut residuals = Vec::with_capacity(n);
for (i, &x) in noise_factors.iter().enumerate() {
let predicted: f64 = coefficients
.iter()
.enumerate()
.map(|(j, &coeff)| coeff * x.powi(j as i32))
.sum();
residuals.push(measured_values[i] - predicted);
}
let error_estimate =
residuals.iter().map(|r| r.abs()).sum::<f64>() / residuals.len() as f64;
let fit_stats = FitStatistics {
residuals,
num_parameters: order + 1,
..Default::default()
};
Ok((zero_noise_value, error_estimate, fit_stats))
}
fn richardson_extrapolation(
&self,
noise_factors: &[f64],
measured_values: &[f64],
) -> Result<(f64, f64, FitStatistics)> {
if noise_factors.len() < 3 {
return Err(SimulatorError::InvalidInput(
"Richardson extrapolation requires at least 3 data points".to_string(),
));
}
let x1 = noise_factors[0];
let x2 = noise_factors[1];
let x3 = noise_factors[2];
let y1 = measured_values[0];
let y2 = measured_values[1];
let y3 = measured_values[2];
let denominator = (x1 - x2) * (x1 - x3) * (x2 - x3);
if denominator.abs() < 1e-12 {
return Err(SimulatorError::NumericalError(
"Noise factors too close for Richardson extrapolation".to_string(),
));
}
let a = (y3 * x1 * x2).mul_add(
x1 - x2,
(y1 * x2 * x3).mul_add(x2 - x3, y2 * x1 * x3 * (x3 - x1)),
) / denominator;
let zero_noise_value = a;
let error_estimate = (y1 - y2).abs().max((y2 - y3).abs()) * 0.1;
let fit_stats = FitStatistics {
num_parameters: 3,
..Default::default()
};
Ok((zero_noise_value, error_estimate, fit_stats))
}
fn adaptive_extrapolation(
&self,
noise_factors: &[f64],
measured_values: &[f64],
uncertainties: &[f64],
) -> Result<(f64, f64, FitStatistics)> {
let mut best_result = None;
let mut best_aic = f64::INFINITY;
let methods = vec![
ExtrapolationMethod::Linear,
ExtrapolationMethod::Exponential,
ExtrapolationMethod::Polynomial(2),
ExtrapolationMethod::Polynomial(3),
];
for method in methods {
let result = match method {
ExtrapolationMethod::Linear => {
self.linear_extrapolation(noise_factors, measured_values, uncertainties)
}
ExtrapolationMethod::Exponential => {
self.exponential_extrapolation(noise_factors, measured_values, uncertainties)
}
ExtrapolationMethod::Polynomial(order) => self.polynomial_extrapolation(
noise_factors,
measured_values,
uncertainties,
order,
),
_ => continue,
};
if let Ok((value, error, stats)) = result {
let aic = stats.aic;
if aic < best_aic {
best_aic = aic;
best_result = Some((value, error, stats));
}
}
}
best_result.ok_or_else(|| {
SimulatorError::ComputationError("No extrapolation method succeeded".to_string())
})
}
fn weighted_linear_fit(
&self,
x: &[f64],
y: &[f64],
weights: &[f64],
) -> Result<(f64, f64, FitStatistics)> {
let n = x.len();
if n < 2 {
return Err(SimulatorError::InvalidInput(
"Need at least 2 points for linear fit".to_string(),
));
}
let w: Vec<f64> = weights
.iter()
.map(|&sigma| {
if sigma > 0.0 {
1.0 / (sigma * sigma)
} else {
1.0
}
})
.collect();
let sum_w: f64 = w.iter().sum();
let sum_wx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi).sum();
let sum_wy: f64 = w.iter().zip(y.iter()).map(|(&wi, &yi)| wi * yi).sum();
let sum_wxx: f64 = w.iter().zip(x.iter()).map(|(&wi, &xi)| wi * xi * xi).sum();
let sum_wxy: f64 = w
.iter()
.zip(x.iter())
.zip(y.iter())
.map(|((&wi, &xi), &yi)| wi * xi * yi)
.sum();
let delta = sum_w.mul_add(sum_wxx, -(sum_wx * sum_wx));
if delta.abs() < 1e-12 {
return Err(SimulatorError::NumericalError(
"Singular matrix in linear fit".to_string(),
));
}
let a = sum_wxx.mul_add(sum_wy, -(sum_wx * sum_wxy)) / delta; let b = sum_w.mul_add(sum_wxy, -(sum_wx * sum_wy)) / delta;
let mut residuals = Vec::with_capacity(n);
let mut ss_res = 0.0;
let mut ss_tot = 0.0;
let y_mean = y.iter().sum::<f64>() / n as f64;
for i in 0..n {
let predicted = b.mul_add(x[i], a);
let residual = y[i] - predicted;
residuals.push(residual);
ss_res += residual * residual;
ss_tot += (y[i] - y_mean) * (y[i] - y_mean);
}
let r_squared = if ss_tot > 0.0 {
1.0 - ss_res / ss_tot
} else {
0.0
};
let chi_squared_reduced = ss_res / (n - 2) as f64;
let aic = (n as f64).mul_add((ss_res / n as f64).ln(), 2.0 * 2.0);
let fit_stats = FitStatistics {
r_squared,
chi_squared_reduced,
aic,
num_parameters: 2,
residuals,
};
Ok((a, b, fit_stats))
}
fn solve_weighted_least_squares(
&self,
design_matrix: &Array2<f64>,
y: &[f64],
weights: &[f64],
) -> Result<Vec<f64>> {
let n_params = design_matrix.ncols();
let mut coefficients = vec![0.0; n_params];
coefficients[0] = y[0];
Ok(coefficients)
}
fn estimate_measurement_uncertainty(&self, measured_value: f64) -> f64 {
let p = f64::midpoint(measured_value, 1.0); let shot_noise = (p * (1.0 - p) / self.shots_per_level as f64).sqrt();
shot_noise * 2.0 }
fn calculate_confidence(&self, fit_stats: &FitStatistics) -> f64 {
let r_sq_factor = fit_stats.r_squared.clamp(0.0, 1.0);
let chi_sq_factor = if fit_stats.chi_squared_reduced > 0.0 {
1.0 / (1.0 + fit_stats.chi_squared_reduced)
} else {
0.5
};
(r_sq_factor * chi_sq_factor).sqrt()
}
}
pub struct VirtualDistillation {
num_copies: usize,
protocol: DistillationProtocol,
}
#[derive(Debug, Clone, Copy)]
pub enum DistillationProtocol {
Standard,
Optimized,
Adaptive,
}
impl VirtualDistillation {
#[must_use]
pub const fn new(num_copies: usize, protocol: DistillationProtocol) -> Self {
Self {
num_copies,
protocol,
}
}
pub fn distill<F>(
&self,
noisy_executor: F,
observable: &str,
) -> Result<VirtualDistillationResult>
where
F: Fn() -> Result<f64>,
{
let measurements: Result<Vec<f64>> =
(0..self.num_copies).map(|_| noisy_executor()).collect();
let measurements = measurements?;
let mitigated_value = match self.protocol {
DistillationProtocol::Standard => self.standard_distillation(&measurements)?,
DistillationProtocol::Optimized => self.optimized_distillation(&measurements)?,
DistillationProtocol::Adaptive => self.adaptive_distillation(&measurements)?,
};
let raw_value = measurements.iter().sum::<f64>() / measurements.len() as f64;
let error_reduction = (raw_value - mitigated_value).abs() / raw_value.abs().max(1e-10);
Ok(VirtualDistillationResult {
mitigated_value,
overhead: self.num_copies,
efficiency: 1.0 / self.num_copies as f64,
error_reduction,
})
}
fn standard_distillation(&self, measurements: &[f64]) -> Result<f64> {
let product: f64 = measurements.iter().product();
let mitigated = if product >= 0.0 {
product.powf(1.0 / self.num_copies as f64)
} else {
-(-product).powf(1.0 / self.num_copies as f64)
};
Ok(mitigated)
}
fn optimized_distillation(&self, measurements: &[f64]) -> Result<f64> {
let mut sorted = measurements.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let median = sorted[sorted.len() / 2];
Ok(median)
}
fn adaptive_distillation(&self, measurements: &[f64]) -> Result<f64> {
let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
/ measurements.len() as f64;
if variance < 0.1 {
self.standard_distillation(measurements)
} else {
self.optimized_distillation(measurements)
}
}
}
pub struct SymmetryVerification {
symmetries: Vec<SymmetryOperation>,
}
#[derive(Debug, Clone)]
pub struct SymmetryOperation {
pub name: String,
pub eigenvalue: Complex64,
pub tolerance: f64,
}
impl SymmetryVerification {
#[must_use]
pub const fn new(symmetries: Vec<SymmetryOperation>) -> Self {
Self { symmetries }
}
pub fn verify_and_correct<F>(
&self,
executor: F,
observable: &str,
) -> Result<SymmetryVerificationResult>
where
F: Fn(&str) -> Result<f64>,
{
let main_value = executor(observable)?;
let mut violations = Vec::new();
let mut valid_measurements = Vec::new();
for symmetry in &self.symmetries {
let symmetry_value = executor(&symmetry.name)?;
let expected = symmetry.eigenvalue.re;
let violation = (symmetry_value - expected).abs();
violations.push(violation);
if violation <= symmetry.tolerance {
valid_measurements.push(main_value);
}
}
let corrected_value = if valid_measurements.is_empty() {
main_value } else {
valid_measurements.iter().sum::<f64>() / valid_measurements.len() as f64
};
let avg_violation = violations.iter().sum::<f64>() / violations.len() as f64;
let post_selection_prob =
valid_measurements.len() as f64 / (self.symmetries.len() + 1) as f64;
Ok(SymmetryVerificationResult {
corrected_value,
symmetry_violation: avg_violation,
post_selection_prob,
symmetries: self.symmetries.iter().map(|s| s.name.clone()).collect(),
})
}
}
trait Unzip3<A, B, C> {
fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>);
}
impl<A, B, C> Unzip3<A, B, C> for Vec<(A, B, C)> {
fn unzip3(self) -> (Vec<A>, Vec<B>, Vec<C>) {
let mut vec_a = Vec::with_capacity(self.len());
let mut vec_b = Vec::with_capacity(self.len());
let mut vec_c = Vec::with_capacity(self.len());
for (a, b, c) in self {
vec_a.push(a);
vec_b.push(b);
vec_c.push(c);
}
(vec_a, vec_b, vec_c)
}
}
pub fn benchmark_noise_extrapolation() -> Result<(ZNEResult, VirtualDistillationResult)> {
let noisy_executor = |noise_factor: f64| -> Result<f64> {
let ideal_value = 1.0;
let noise_rate = 0.1;
Ok(ideal_value * (-noise_rate * noise_factor).exp())
};
let zne = ZeroNoiseExtrapolator::default_config();
let zne_result = zne.extrapolate(noisy_executor, "Z0")?;
let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
let vd_result = vd.distill(|| Ok(0.8), "Z0")?;
Ok((zne_result, vd_result))
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PECResult {
pub mitigated_value: f64,
pub statistical_error: f64,
pub sampling_overhead: f64,
pub num_samples: usize,
pub effective_samples: f64,
}
#[derive(Debug, Clone)]
pub struct ProbabilisticErrorCancellation {
pub noise_model: NoiseModel,
pub num_samples: usize,
pub seed: Option<u64>,
}
#[derive(Debug, Clone, Default)]
pub struct NoiseModel {
pub single_qubit_errors: std::collections::HashMap<String, f64>,
pub two_qubit_errors: std::collections::HashMap<(usize, usize), f64>,
pub readout_errors: std::collections::HashMap<usize, (f64, f64)>,
}
impl ProbabilisticErrorCancellation {
pub fn new(noise_model: NoiseModel, num_samples: usize) -> Self {
Self {
noise_model,
num_samples,
seed: None,
}
}
pub fn with_seed(mut self, seed: u64) -> Self {
self.seed = Some(seed);
self
}
pub fn calculate_overhead(&self, circuit_depth: usize, num_gates: usize) -> f64 {
let avg_error: f64 = self
.noise_model
.single_qubit_errors
.values()
.cloned()
.sum::<f64>()
/ self.noise_model.single_qubit_errors.len().max(1) as f64;
let gamma_per_gate = 1.0 + 2.0 * avg_error;
gamma_per_gate.powi(num_gates as i32)
}
pub fn mitigate<F>(&self, executor: F, observable: &str) -> Result<PECResult>
where
F: Fn(&str, i32) -> Result<f64>,
{
use scirs2_core::random::thread_rng;
use scirs2_core::random::Rng;
let mut rng = thread_rng();
let mut weighted_sum = 0.0;
let mut weight_sum = 0.0;
let mut samples = Vec::with_capacity(self.num_samples);
for _ in 0..self.num_samples {
let sign: i32 = if rng.random::<f64>() < 0.5 { 1 } else { -1 };
let value = executor(observable, sign)?;
let weighted_value = sign as f64 * value;
weighted_sum += weighted_value;
weight_sum += 1.0;
samples.push(weighted_value);
}
let mitigated_value = weighted_sum / weight_sum;
let variance: f64 = samples
.iter()
.map(|&x| (x - mitigated_value).powi(2))
.sum::<f64>()
/ (self.num_samples - 1).max(1) as f64;
let statistical_error = (variance / self.num_samples as f64).sqrt();
let sampling_overhead = self.calculate_overhead(10, 20);
Ok(PECResult {
mitigated_value,
statistical_error,
sampling_overhead,
num_samples: self.num_samples,
effective_samples: self.num_samples as f64 / sampling_overhead,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CDRResult {
pub mitigated_value: f64,
pub regression_error: f64,
pub num_clifford_circuits: usize,
pub coefficients: Vec<f64>,
pub r_squared: f64,
}
#[derive(Debug, Clone)]
pub struct CliffordDataRegression {
pub num_training_circuits: usize,
pub regression_method: RegressionMethod,
pub fit_intercept: bool,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum RegressionMethod {
OrdinaryLeastSquares,
Ridge { alpha: f64 },
Lasso { alpha: f64 },
Bayesian,
}
impl Default for CliffordDataRegression {
fn default() -> Self {
Self {
num_training_circuits: 100,
regression_method: RegressionMethod::OrdinaryLeastSquares,
fit_intercept: true,
}
}
}
impl CliffordDataRegression {
pub fn new(num_training_circuits: usize, regression_method: RegressionMethod) -> Self {
Self {
num_training_circuits,
regression_method,
fit_intercept: true,
}
}
pub fn train<F, G>(&self, noisy_executor: F, ideal_executor: G) -> Result<CDRModel>
where
F: Fn(usize) -> Result<f64>,
G: Fn(usize) -> Result<f64>,
{
let mut noisy_values = Vec::with_capacity(self.num_training_circuits);
let mut ideal_values = Vec::with_capacity(self.num_training_circuits);
for i in 0..self.num_training_circuits {
noisy_values.push(noisy_executor(i)?);
ideal_values.push(ideal_executor(i)?);
}
let n = self.num_training_circuits as f64;
let sum_x: f64 = noisy_values.iter().sum();
let sum_y: f64 = ideal_values.iter().sum();
let sum_xx: f64 = noisy_values.iter().map(|x| x * x).sum();
let sum_xy: f64 = noisy_values
.iter()
.zip(&ideal_values)
.map(|(x, y)| x * y)
.sum();
let (a, b) = if self.fit_intercept {
let denom = n * sum_xx - sum_x * sum_x;
if denom.abs() < 1e-10 {
(1.0, 0.0)
} else {
let slope = (n * sum_xy - sum_x * sum_y) / denom;
let intercept = (sum_y - slope * sum_x) / n;
(slope, intercept)
}
} else {
(sum_xy / sum_xx, 0.0)
};
let y_mean = sum_y / n;
let ss_tot: f64 = ideal_values.iter().map(|y| (y - y_mean).powi(2)).sum();
let ss_res: f64 = noisy_values
.iter()
.zip(&ideal_values)
.map(|(x, y)| (y - (a * x + b)).powi(2))
.sum();
let r_squared = if ss_tot > 1e-10 {
1.0 - ss_res / ss_tot
} else {
0.0
};
Ok(CDRModel {
coefficients: vec![a, b],
r_squared,
num_training_points: self.num_training_circuits,
})
}
pub fn mitigate(&self, model: &CDRModel, noisy_value: f64) -> Result<CDRResult> {
let mitigated_value = model.coefficients[0] * noisy_value + model.coefficients[1];
let regression_error = (1.0 - model.r_squared).sqrt() * noisy_value.abs().max(0.1);
Ok(CDRResult {
mitigated_value,
regression_error,
num_clifford_circuits: model.num_training_points,
coefficients: model.coefficients.clone(),
r_squared: model.r_squared,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CDRModel {
pub coefficients: Vec<f64>,
pub r_squared: f64,
pub num_training_points: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct M3Result {
pub mitigated_probs: std::collections::HashMap<String, f64>,
pub original_counts: std::collections::HashMap<String, usize>,
pub num_iterations: usize,
pub converged: bool,
pub residual_norm: f64,
}
#[derive(Debug, Clone)]
pub struct MatrixFreeMeasurementMitigation {
pub calibration: M3Calibration,
pub max_iterations: usize,
pub tolerance: f64,
pub solver_method: M3SolverMethod,
}
#[derive(Debug, Clone, Default)]
pub struct M3Calibration {
pub confusion_matrices: std::collections::HashMap<usize, [[f64; 2]; 2]>,
pub correlated_errors: std::collections::HashMap<(usize, usize), Array2<f64>>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum M3SolverMethod {
IterativeBayesian,
NonNegativeLeastSquares,
ConjugateGradient,
}
impl Default for MatrixFreeMeasurementMitigation {
fn default() -> Self {
Self {
calibration: M3Calibration::default(),
max_iterations: 100,
tolerance: 1e-6,
solver_method: M3SolverMethod::IterativeBayesian,
}
}
}
impl MatrixFreeMeasurementMitigation {
pub fn new(calibration: M3Calibration) -> Self {
Self {
calibration,
..Default::default()
}
}
pub fn calibrate<F>(&mut self, executor: F, num_qubits: usize, shots: usize) -> Result<()>
where
F: Fn(&str, usize) -> Result<std::collections::HashMap<String, usize>>,
{
for qubit in 0..num_qubits {
let counts_0 = executor(&format!("cal_0_{}", qubit), shots)?;
let p00 = *counts_0.get("0").unwrap_or(&0) as f64 / shots as f64;
let p10 = *counts_0.get("1").unwrap_or(&0) as f64 / shots as f64;
let counts_1 = executor(&format!("cal_1_{}", qubit), shots)?;
let p01 = *counts_1.get("0").unwrap_or(&0) as f64 / shots as f64;
let p11 = *counts_1.get("1").unwrap_or(&0) as f64 / shots as f64;
self.calibration
.confusion_matrices
.insert(qubit, [[p00, p01], [p10, p11]]);
}
Ok(())
}
pub fn mitigate(
&self,
counts: &std::collections::HashMap<String, usize>,
num_qubits: usize,
) -> Result<M3Result> {
let total_shots: usize = counts.values().sum();
let mut probs: std::collections::HashMap<String, f64> = counts
.iter()
.map(|(k, v)| (k.clone(), *v as f64 / total_shots as f64))
.collect();
let mut converged = false;
let mut num_iterations = 0;
let mut residual_norm = f64::MAX;
for iter in 0..self.max_iterations {
num_iterations = iter + 1;
let mut new_probs = std::collections::HashMap::new();
let mut total = 0.0;
for (bitstring, &prob) in &probs {
let mut correction = 1.0;
for (i, c) in bitstring.chars().rev().enumerate() {
if let Some(confusion) = self.calibration.confusion_matrices.get(&i) {
let measured = if c == '1' { 1 } else { 0 };
let diag = confusion[measured][measured];
if diag > 0.01 {
correction /= diag;
}
}
}
let corrected_prob = (prob * correction).max(0.0);
new_probs.insert(bitstring.clone(), corrected_prob);
total += corrected_prob;
}
if total > 0.0 {
for prob in new_probs.values_mut() {
*prob /= total;
}
}
let mut diff = 0.0;
for (k, &new_p) in &new_probs {
let old_p = probs.get(k).unwrap_or(&0.0);
diff += (new_p - old_p).abs();
}
residual_norm = diff;
probs = new_probs;
if diff < self.tolerance {
converged = true;
break;
}
}
Ok(M3Result {
mitigated_probs: probs,
original_counts: counts.clone(),
num_iterations,
converged,
residual_norm,
})
}
pub fn expectation_value(&self, result: &M3Result, observable: &[i8]) -> f64 {
let mut expectation = 0.0;
for (bitstring, &prob) in &result.mitigated_probs {
let mut parity = 1;
for (i, c) in bitstring.chars().rev().enumerate() {
if i < observable.len() && observable[i] != 0 && c == '1' {
parity *= -1;
}
}
expectation += parity as f64 * prob;
}
expectation
}
}
#[derive(Debug, Clone)]
pub struct ErrorMitigationPipeline {
pub zne: Option<ZeroNoiseExtrapolator>,
pub pec: Option<ProbabilisticErrorCancellation>,
pub cdr: Option<CliffordDataRegression>,
pub m3: Option<MatrixFreeMeasurementMitigation>,
}
impl Default for ErrorMitigationPipeline {
fn default() -> Self {
Self {
zne: Some(ZeroNoiseExtrapolator::default_config()),
pec: None,
cdr: None,
m3: None,
}
}
}
impl ErrorMitigationPipeline {
pub fn full() -> Self {
Self {
zne: Some(ZeroNoiseExtrapolator::default_config()),
pec: Some(ProbabilisticErrorCancellation::new(
NoiseModel::default(),
1000,
)),
cdr: Some(CliffordDataRegression::default()),
m3: Some(MatrixFreeMeasurementMitigation::default()),
}
}
pub fn apply<F>(&self, executor: F, observable: &str) -> Result<f64>
where
F: Fn(f64) -> Result<f64> + Sync + Send,
{
let mut value = if let Some(zne) = &self.zne {
zne.extrapolate(&executor, observable)?.zero_noise_value
} else {
executor(1.0)?
};
Ok(value)
}
pub fn apply_simple<F>(&self, executor: F, observable: &str) -> Result<f64>
where
F: Fn(&str) -> Result<f64>,
{
let value = executor(observable)?;
Ok(value)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_zne_linear_extrapolation() {
let zne = ZeroNoiseExtrapolator::new(
NoiseScalingMethod::UnitaryFolding,
ExtrapolationMethod::Linear,
vec![1.0, 2.0, 3.0],
100,
);
let noise_factors = vec![1.0, 2.0, 3.0];
let measured_values = vec![0.9, 0.8, 0.7];
let uncertainties = vec![0.01, 0.01, 0.01];
let (zero_noise, _error, _stats) = zne
.linear_extrapolation(&noise_factors, &measured_values, &uncertainties)
.expect("Failed to perform linear extrapolation");
assert!((zero_noise - 1.0).abs() < 0.1);
}
#[test]
fn test_virtual_distillation() {
let vd = VirtualDistillation::new(3, DistillationProtocol::Standard);
let measurements = vec![0.8, 0.7, 0.9];
let result = vd
.standard_distillation(&measurements)
.expect("Failed to perform standard distillation");
assert!(result > 0.0);
assert!(result < 1.0);
}
#[test]
fn test_symmetry_verification() {
let symmetries = vec![SymmetryOperation {
name: "parity".to_string(),
eigenvalue: Complex64::new(1.0, 0.0),
tolerance: 0.1,
}];
let sv = SymmetryVerification::new(symmetries);
let executor = |obs: &str| -> Result<f64> {
match obs {
"Z0" => Ok(0.8),
"parity" => Ok(0.95), _ => Ok(0.0),
}
};
let result = sv
.verify_and_correct(executor, "Z0")
.expect("Failed to verify and correct");
assert!((result.corrected_value - 0.8).abs() < 1e-10);
assert!(result.post_selection_prob > 0.0);
}
#[test]
fn test_richardson_extrapolation() {
let zne = ZeroNoiseExtrapolator::default_config();
let noise_factors = vec![1.0, 2.0, 3.0];
let measured_values = vec![1.0, 0.8, 0.6];
let (zero_noise, _error, _stats) = zne
.richardson_extrapolation(&noise_factors, &measured_values)
.expect("Failed to perform Richardson extrapolation");
assert!(zero_noise > 0.0);
}
}