use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::random::prelude::*;
use scirs2_core::random::ChaCha8Rng;
use scirs2_core::random::{RngExt, SeedableRng};
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use crate::error::{Result, SimulatorError};
use crate::pauli::{PauliOperatorSum, PauliString};
use crate::statevector::StateVectorSimulator;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShotResult {
pub outcomes: Vec<BitString>,
pub num_shots: usize,
pub statistics: MeasurementStatistics,
pub config: SamplingConfig,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct BitString {
pub bits: Vec<u8>,
}
impl BitString {
#[must_use]
pub fn from_bools(bools: &[bool]) -> Self {
Self {
bits: bools.iter().map(|&b| u8::from(b)).collect(),
}
}
#[must_use]
pub fn to_bools(&self) -> Vec<bool> {
self.bits.iter().map(|&b| b == 1).collect()
}
#[must_use]
pub fn to_int(&self) -> usize {
self.bits
.iter()
.enumerate()
.map(|(i, &bit)| (bit as usize) << i)
.sum()
}
#[must_use]
pub fn from_int(mut value: usize, num_bits: usize) -> Self {
let mut bits = Vec::with_capacity(num_bits);
for _ in 0..num_bits {
bits.push((value & 1) as u8);
value >>= 1;
}
Self { bits }
}
#[must_use]
pub fn len(&self) -> usize {
self.bits.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.bits.is_empty()
}
#[must_use]
pub fn weight(&self) -> usize {
self.bits.iter().map(|&b| b as usize).sum()
}
#[must_use]
pub fn distance(&self, other: &Self) -> usize {
if self.len() != other.len() {
return usize::MAX; }
self.bits
.iter()
.zip(&other.bits)
.map(|(&a, &b)| (a ^ b) as usize)
.sum()
}
}
impl std::fmt::Display for BitString {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for &bit in &self.bits {
write!(f, "{bit}")?;
}
Ok(())
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MeasurementStatistics {
pub counts: HashMap<BitString, usize>,
pub mode: BitString,
pub probabilities: HashMap<BitString, f64>,
pub probability_variance: f64,
pub confidence_intervals: HashMap<BitString, (f64, f64)>,
pub entropy: f64,
pub purity: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SamplingConfig {
pub num_shots: usize,
pub seed: Option<u64>,
pub confidence_level: f64,
pub compute_statistics: bool,
pub estimate_convergence: bool,
pub convergence_check_interval: usize,
pub convergence_tolerance: f64,
pub max_shots_for_convergence: usize,
}
impl Default for SamplingConfig {
fn default() -> Self {
Self {
num_shots: 1024,
seed: None,
confidence_level: 0.95,
compute_statistics: true,
estimate_convergence: false,
convergence_check_interval: 100,
convergence_tolerance: 0.01,
max_shots_for_convergence: 10_000,
}
}
}
pub struct QuantumSampler {
rng: ChaCha8Rng,
config: SamplingConfig,
}
impl QuantumSampler {
#[must_use]
pub fn new(config: SamplingConfig) -> Self {
let rng = if let Some(seed) = config.seed {
ChaCha8Rng::seed_from_u64(seed)
} else {
ChaCha8Rng::from_rng(&mut thread_rng())
};
Self { rng, config }
}
pub fn sample_state(&mut self, state: &Array1<Complex64>) -> Result<ShotResult> {
let num_qubits = (state.len() as f64).log2() as usize;
if 1 << num_qubits != state.len() {
return Err(SimulatorError::InvalidInput(
"State vector dimension must be a power of 2".to_string(),
));
}
let probabilities: Vec<f64> = state.iter().map(scirs2_core::Complex::norm_sqr).collect();
let total_prob: f64 = probabilities.iter().sum();
if (total_prob - 1.0).abs() > 1e-10 {
return Err(SimulatorError::InvalidInput(format!(
"State vector not normalized: total probability = {total_prob}"
)));
}
let mut outcomes = Vec::with_capacity(self.config.num_shots);
for _ in 0..self.config.num_shots {
let sample = self.sample_from_distribution(&probabilities)?;
outcomes.push(BitString::from_int(sample, num_qubits));
}
let statistics = if self.config.compute_statistics {
self.compute_statistics(&outcomes)?
} else {
MeasurementStatistics {
counts: HashMap::new(),
mode: BitString::from_int(0, num_qubits),
probabilities: HashMap::new(),
probability_variance: 0.0,
confidence_intervals: HashMap::new(),
entropy: 0.0,
purity: 0.0,
}
};
Ok(ShotResult {
outcomes,
num_shots: self.config.num_shots,
statistics,
config: self.config.clone(),
})
}
pub fn sample_state_with_noise(
&mut self,
state: &Array1<Complex64>,
noise_model: &dyn NoiseModel,
) -> Result<ShotResult> {
let noisy_state = noise_model.apply_readout_noise(state)?;
self.sample_state(&noisy_state)
}
pub fn sample_expectation(
&mut self,
state: &Array1<Complex64>,
observable: &PauliOperatorSum,
) -> Result<ExpectationResult> {
let mut expectation_values = Vec::new();
let mut variances = Vec::new();
for term in &observable.terms {
let term_result = self.sample_pauli_expectation(state, term)?;
expectation_values.push(term_result.expectation * term.coefficient.re);
variances.push(term_result.variance * term.coefficient.re.powi(2));
}
let total_expectation: f64 = expectation_values.iter().sum();
let total_variance: f64 = variances.iter().sum();
let standard_error = (total_variance / self.config.num_shots as f64).sqrt();
let z_score = self.get_z_score(self.config.confidence_level);
let confidence_interval = (
z_score.mul_add(-standard_error, total_expectation),
z_score.mul_add(standard_error, total_expectation),
);
Ok(ExpectationResult {
expectation: total_expectation,
variance: total_variance,
standard_error,
confidence_interval,
num_shots: self.config.num_shots,
})
}
fn sample_pauli_expectation(
&mut self,
state: &Array1<Complex64>,
pauli_string: &PauliString,
) -> Result<ExpectationResult> {
let num_qubits = pauli_string.num_qubits;
let mut measurements = Vec::with_capacity(self.config.num_shots);
for _ in 0..self.config.num_shots {
let outcome = self.measure_pauli_basis(state, pauli_string)?;
measurements.push(outcome);
}
let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
/ measurements.len() as f64;
let standard_error = (variance / measurements.len() as f64).sqrt();
let z_score = self.get_z_score(self.config.confidence_level);
let confidence_interval = (
z_score.mul_add(-standard_error, mean),
z_score.mul_add(standard_error, mean),
);
Ok(ExpectationResult {
expectation: mean,
variance,
standard_error,
confidence_interval,
num_shots: measurements.len(),
})
}
fn measure_pauli_basis(
&mut self,
_state: &Array1<Complex64>,
_pauli_string: &PauliString,
) -> Result<f64> {
if self.rng.random::<f64>() < 0.5 {
Ok(1.0)
} else {
Ok(-1.0)
}
}
fn sample_from_distribution(&mut self, probabilities: &[f64]) -> Result<usize> {
let random_value = self.rng.random::<f64>();
let mut cumulative = 0.0;
for (i, &prob) in probabilities.iter().enumerate() {
cumulative += prob;
if random_value <= cumulative {
return Ok(i);
}
}
Ok(probabilities.len() - 1)
}
pub fn sample_multinomial(
&mut self,
probabilities: &[f64],
n_samples: usize,
) -> Result<Vec<usize>> {
if probabilities.is_empty() {
return Err(SimulatorError::InvalidInput(
"probability distribution must be non-empty".to_string(),
));
}
let total: f64 = probabilities.iter().sum();
if total <= 0.0 || !total.is_finite() {
return Err(SimulatorError::InvalidInput(format!(
"probability distribution must sum to a positive finite value, got {total}"
)));
}
let normalised: Vec<f64> = probabilities.iter().map(|p| p / total).collect();
let mut cdf = Vec::with_capacity(normalised.len());
let mut running = 0.0;
for &p in &normalised {
running += p;
cdf.push(running);
}
if let Some(last) = cdf.last_mut() {
*last = 1.0;
}
let k = probabilities.len();
let mut counts = vec![0usize; k];
for _ in 0..n_samples {
let u: f64 = self.rng.random();
let idx = cdf.partition_point(|&c| c < u).min(k - 1);
counts[idx] += 1;
}
Ok(counts)
}
pub fn importance_sampling(
&mut self,
target_probs: &[f64],
proposal_probs: &[f64],
f_values: &[f64],
n_samples: usize,
) -> Result<ImportanceSamplingResult> {
let k = target_probs.len();
if k == 0 {
return Err(SimulatorError::InvalidInput(
"distributions must be non-empty".to_string(),
));
}
if proposal_probs.len() != k || f_values.len() != k {
return Err(SimulatorError::InvalidInput(format!(
"all arrays must have the same length (got {k}, {}, {})",
proposal_probs.len(),
f_values.len()
)));
}
let p_total: f64 = target_probs.iter().sum();
let q_total: f64 = proposal_probs.iter().sum();
if p_total <= 0.0 || !p_total.is_finite() {
return Err(SimulatorError::InvalidInput(
"target distribution must sum to a positive finite value".to_string(),
));
}
if q_total <= 0.0 || !q_total.is_finite() {
return Err(SimulatorError::InvalidInput(
"proposal distribution must sum to a positive finite value".to_string(),
));
}
let p_norm: Vec<f64> = target_probs.iter().map(|p| p / p_total).collect();
let q_norm: Vec<f64> = proposal_probs.iter().map(|q| q / q_total).collect();
for i in 0..k {
if p_norm[i] > 1e-15 && q_norm[i] < 1e-300 {
return Err(SimulatorError::InvalidInput(format!(
"proposal probability at index {i} is zero where target is non-zero; \
importance sampling requires the proposal to cover the target's support"
)));
}
}
let samples: Vec<usize> = (0..n_samples)
.map(|_| self.sample_from_distribution(&q_norm))
.collect::<Result<Vec<_>>>()?;
let mut raw_weights: Vec<f64> = samples
.iter()
.map(|&idx| {
let q = q_norm[idx];
if q < 1e-300 {
0.0
} else {
p_norm[idx] / q
}
})
.collect();
let w_sum: f64 = raw_weights.iter().sum();
if w_sum <= 0.0 {
return Err(SimulatorError::InvalidInput(
"all importance weights are zero; the proposal does not cover the target"
.to_string(),
));
}
for w in &mut raw_weights {
*w /= w_sum;
}
let estimate: f64 = samples
.iter()
.zip(raw_weights.iter())
.map(|(&idx, &w)| w * f_values[idx])
.sum();
let w_sq_sum: f64 = raw_weights.iter().map(|w| w * w).sum();
let effective_sample_size = if w_sq_sum > 0.0 { 1.0 / w_sq_sum } else { 0.0 };
let weighted_f: Vec<f64> = samples
.iter()
.zip(raw_weights.iter())
.map(|(&idx, &w)| w * f_values[idx])
.collect();
let mean_wf: f64 = weighted_f.iter().sum::<f64>() / n_samples as f64;
let variance: f64 = weighted_f
.iter()
.map(|&wf| (wf - mean_wf).powi(2))
.sum::<f64>()
/ (n_samples as f64 - 1.0).max(1.0);
Ok(ImportanceSamplingResult {
estimate,
effective_sample_size,
variance,
n_samples,
})
}
fn compute_statistics(&self, outcomes: &[BitString]) -> Result<MeasurementStatistics> {
let mut counts = HashMap::new();
let total_shots = outcomes.len() as f64;
for outcome in outcomes {
*counts.entry(outcome.clone()).or_insert(0) += 1;
}
let mode = counts.iter().max_by_key(|(_, &count)| count).map_or_else(
|| BitString::from_int(0, outcomes[0].len()),
|(outcome, _)| outcome.clone(),
);
let mut probabilities = HashMap::new();
let mut confidence_intervals = HashMap::new();
let z_score = self.get_z_score(self.config.confidence_level);
for (outcome, &count) in &counts {
let prob = count as f64 / total_shots;
probabilities.insert(outcome.clone(), prob);
let std_error = (prob * (1.0 - prob) / total_shots).sqrt();
let margin = z_score * std_error;
confidence_intervals.insert(
outcome.clone(),
((prob - margin).max(0.0), (prob + margin).min(1.0)),
);
}
let entropy = probabilities
.values()
.filter(|&&p| p > 0.0)
.map(|&p| -p * p.ln())
.sum::<f64>();
let purity = probabilities.values().map(|&p| p * p).sum::<f64>();
let mean_prob = 1.0 / probabilities.len() as f64;
let probability_variance = probabilities
.values()
.map(|&p| (p - mean_prob).powi(2))
.sum::<f64>()
/ probabilities.len() as f64;
Ok(MeasurementStatistics {
counts,
mode,
probabilities,
probability_variance,
confidence_intervals,
entropy,
purity,
})
}
fn get_z_score(&self, confidence_level: f64) -> f64 {
match (confidence_level * 100.0) as i32 {
90 => 1.645,
95 => 1.96,
99 => 2.576,
_ => 1.96, }
}
pub fn estimate_convergence(
&mut self,
state: &Array1<Complex64>,
observable: &PauliOperatorSum,
) -> Result<ConvergenceResult> {
let mut expectation_history = Vec::new();
let mut variance_history = Vec::new();
let mut shots_taken = 0;
let mut converged = false;
while shots_taken < self.config.max_shots_for_convergence && !converged {
let batch_shots = self
.config
.convergence_check_interval
.min(self.config.max_shots_for_convergence - shots_taken);
let original_shots = self.config.num_shots;
self.config.num_shots = batch_shots;
let result = self.sample_expectation(state, observable)?;
self.config.num_shots = original_shots;
expectation_history.push(result.expectation);
variance_history.push(result.variance);
shots_taken += batch_shots;
if expectation_history.len() >= 3 {
let recent_values = &expectation_history[expectation_history.len() - 3..];
let max_diff = recent_values
.iter()
.zip(recent_values.iter().skip(1))
.map(|(a, b)| (a - b).abs())
.fold(0.0, f64::max);
if max_diff < self.config.convergence_tolerance {
converged = true;
}
}
}
let final_expectation = expectation_history.last().copied().unwrap_or(0.0);
let expectation_std = if expectation_history.len() > 1 {
let mean = expectation_history.iter().sum::<f64>() / expectation_history.len() as f64;
(expectation_history
.iter()
.map(|x| (x - mean).powi(2))
.sum::<f64>()
/ (expectation_history.len() - 1) as f64)
.sqrt()
} else {
0.0
};
Ok(ConvergenceResult {
converged,
shots_taken,
final_expectation,
expectation_history,
variance_history,
convergence_rate: expectation_std,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExpectationResult {
pub expectation: f64,
pub variance: f64,
pub standard_error: f64,
pub confidence_interval: (f64, f64),
pub num_shots: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConvergenceResult {
pub converged: bool,
pub shots_taken: usize,
pub final_expectation: f64,
pub expectation_history: Vec<f64>,
pub variance_history: Vec<f64>,
pub convergence_rate: f64,
}
pub trait NoiseModel: Send + Sync {
fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>>;
fn readout_error_probability(&self, qubit: usize) -> f64;
}
#[derive(Debug, Clone)]
pub struct SimpleReadoutNoise {
pub error_probs: Vec<f64>,
}
impl SimpleReadoutNoise {
#[must_use]
pub fn uniform(num_qubits: usize, error_prob: f64) -> Self {
Self {
error_probs: vec![error_prob; num_qubits],
}
}
}
impl NoiseModel for SimpleReadoutNoise {
fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>> {
Ok(state.clone())
}
fn readout_error_probability(&self, qubit: usize) -> f64 {
self.error_probs.get(qubit).copied().unwrap_or(0.0)
}
}
pub mod analysis {
use super::{ComparisonResult, ShotResult};
#[must_use]
pub fn statistical_power(effect_size: f64, num_shots: usize, significance_level: f64) -> f64 {
let standard_error = 1.0 / (num_shots as f64).sqrt();
let z_critical = match (significance_level * 100.0) as i32 {
1 => 2.576,
5 => 1.96,
10 => 1.645,
_ => 1.96,
};
let z_beta = (effect_size / standard_error) - z_critical;
normal_cdf(z_beta)
}
#[must_use]
pub fn required_shots_for_precision(desired_error: f64, confidence_level: f64) -> usize {
let z_score = match (confidence_level * 100.0) as i32 {
90 => 1.645,
95 => 1.96,
99 => 2.576,
_ => 1.96,
};
let n = (z_score * z_score) / (4.0 * desired_error * desired_error);
n.ceil() as usize
}
#[must_use]
pub fn compare_shot_results(
result1: &ShotResult,
result2: &ShotResult,
significance_level: f64,
) -> ComparisonResult {
let mut chi_square = 0.0;
let mut degrees_of_freedom: usize = 0;
let mut all_outcomes = std::collections::HashSet::new();
all_outcomes.extend(result1.statistics.counts.keys());
all_outcomes.extend(result2.statistics.counts.keys());
for outcome in &all_outcomes {
let count1 = result1.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
let count2 = result2.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
let total1 = result1.num_shots as f64;
let total2 = result2.num_shots as f64;
let expected1 = (count1 + count2) * total1 / (total1 + total2);
let expected2 = (count1 + count2) * total2 / (total1 + total2);
if expected1 > 5.0 && expected2 > 5.0 {
chi_square += (count1 - expected1).powi(2) / expected1;
chi_square += (count2 - expected2).powi(2) / expected2;
degrees_of_freedom += 1;
}
}
degrees_of_freedom = degrees_of_freedom.saturating_sub(1);
let critical_value = match (significance_level * 100.0) as i32 {
1 => 6.635, 5 => 3.841,
10 => 2.706,
_ => 3.841,
};
ComparisonResult {
chi_square,
degrees_of_freedom,
p_value: if chi_square > critical_value {
0.01
} else {
0.1
}, significant: chi_square > critical_value,
}
}
fn normal_cdf(x: f64) -> f64 {
0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
}
fn erf(x: f64) -> f64 {
let a1 = 0.254_829_592;
let a2 = -0.284_496_736;
let a3 = 1.421_413_741;
let a4 = -1.453_152_027;
let a5 = 1.061_405_429;
let p = 0.327_591_1;
let sign = if x < 0.0 { -1.0 } else { 1.0 };
let x = x.abs();
let t = 1.0 / (1.0 + p * x);
let y = ((a5 * t + a4).mul_add(t, a3).mul_add(t, a2).mul_add(t, a1) * t)
.mul_add(-(-x * x).exp(), 1.0);
sign * y
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImportanceSamplingResult {
pub estimate: f64,
pub effective_sample_size: f64,
pub variance: f64,
pub n_samples: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComparisonResult {
pub chi_square: f64,
pub degrees_of_freedom: usize,
pub p_value: f64,
pub significant: bool,
}
#[cfg(test)]
#[allow(clippy::field_reassign_with_default)]
mod tests {
use super::*;
#[test]
fn test_bit_string() {
let bs = BitString::from_int(5, 4); assert_eq!(bs.bits, vec![1, 0, 1, 0]);
assert_eq!(bs.to_int(), 5);
assert_eq!(bs.weight(), 2);
}
#[test]
fn test_sampler_creation() {
let config = SamplingConfig::default();
let sampler = QuantumSampler::new(config);
assert_eq!(sampler.config.num_shots, 1024);
}
#[test]
fn test_uniform_state_sampling() {
let mut config = SamplingConfig::default();
config.num_shots = 100;
config.seed = Some(42);
let mut sampler = QuantumSampler::new(config);
let state = Array1::from_vec(vec![
Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
]);
let result = sampler
.sample_state(&state)
.expect("Failed to sample state");
assert_eq!(result.num_shots, 100);
assert_eq!(result.outcomes.len(), 100);
let has_zero = result.outcomes.iter().any(|bs| bs.to_int() == 0);
let has_one = result.outcomes.iter().any(|bs| bs.to_int() == 1);
assert!(has_zero && has_one);
}
#[test]
fn test_required_shots_calculation() {
let shots = analysis::required_shots_for_precision(0.01, 0.95);
assert!(shots > 9000); }
#[test]
fn test_sample_multinomial_basic() {
let mut config = SamplingConfig::default();
config.seed = Some(123);
let mut sampler = QuantumSampler::new(config);
let probs = vec![1.0, 0.0, 0.0];
let counts = sampler
.sample_multinomial(&probs, 100)
.expect("multinomial sampling should succeed");
assert_eq!(counts.len(), 3);
assert_eq!(counts[0], 100);
assert_eq!(counts[1], 0);
assert_eq!(counts[2], 0);
}
#[test]
fn test_sample_multinomial_uniform() {
let mut config = SamplingConfig::default();
config.seed = Some(42);
let mut sampler = QuantumSampler::new(config);
let probs = vec![0.5, 0.5];
let counts = sampler
.sample_multinomial(&probs, 1000)
.expect("multinomial sampling should succeed");
assert_eq!(counts.len(), 2);
assert_eq!(counts[0] + counts[1], 1000);
assert!(counts[0] > 400, "outcome 0 should appear >40% of the time");
assert!(counts[1] > 400, "outcome 1 should appear >40% of the time");
}
#[test]
fn test_sample_multinomial_counts_sum_to_n() {
let mut config = SamplingConfig::default();
config.seed = Some(7);
let mut sampler = QuantumSampler::new(config);
let probs = vec![0.1, 0.3, 0.2, 0.4];
let n = 500;
let counts = sampler
.sample_multinomial(&probs, n)
.expect("multinomial sampling should succeed");
let total: usize = counts.iter().sum();
assert_eq!(total, n, "all counts must sum to n_samples");
}
#[test]
fn test_importance_sampling_identity_proposal() {
let mut config = SamplingConfig::default();
config.seed = Some(99);
let mut sampler = QuantumSampler::new(config);
let probs = vec![1.0 / 3.0; 3];
let f = vec![1.0, 2.0, 3.0];
let result = sampler
.importance_sampling(&probs, &probs, &f, 5000)
.expect("importance sampling should succeed");
assert!(
(result.estimate - 2.0).abs() < 0.3,
"IS estimate should be near 2.0, got {}",
result.estimate
);
assert!(
result.effective_sample_size > 100.0,
"ESS should be large for matched proposal"
);
}
#[test]
fn test_importance_sampling_reweighting() {
let mut config = SamplingConfig::default();
config.seed = Some(55);
let mut sampler = QuantumSampler::new(config);
let target = vec![0.9, 0.1];
let proposal = vec![0.5, 0.5];
let f = vec![0.0, 1.0];
let result = sampler
.importance_sampling(&target, &proposal, &f, 10_000)
.expect("importance sampling should succeed");
assert!(
(result.estimate - 0.1).abs() < 0.05,
"IS estimate should be near 0.1, got {}",
result.estimate
);
}
}