Skip to main content

quantrs2_sim/
shot_sampling.rs

1//! Shot-based sampling with statistical analysis for quantum simulation.
2//!
3//! This module implements comprehensive shot-based sampling methods for quantum
4//! circuits, including measurement statistics, error analysis, and convergence
5//! detection for realistic quantum device simulation.
6
7use scirs2_core::ndarray::{Array1, Array2};
8use scirs2_core::random::prelude::*;
9use scirs2_core::random::ChaCha8Rng;
10use scirs2_core::random::{RngExt, SeedableRng};
11use scirs2_core::Complex64;
12use serde::{Deserialize, Serialize};
13use std::collections::HashMap;
14
15use crate::error::{Result, SimulatorError};
16use crate::pauli::{PauliOperatorSum, PauliString};
17use crate::statevector::StateVectorSimulator;
18
19/// Shot-based measurement result
20#[derive(Debug, Clone, Serialize, Deserialize)]
21pub struct ShotResult {
22    /// Measurement outcomes for each shot
23    pub outcomes: Vec<BitString>,
24    /// Total number of shots
25    pub num_shots: usize,
26    /// Measurement statistics
27    pub statistics: MeasurementStatistics,
28    /// Sampling configuration used
29    pub config: SamplingConfig,
30}
31
32/// Bit string representation of measurement outcome
33#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
34pub struct BitString {
35    /// Bit values (0 or 1)
36    pub bits: Vec<u8>,
37}
38
39impl BitString {
40    /// Create from vector of booleans
41    #[must_use]
42    pub fn from_bools(bools: &[bool]) -> Self {
43        Self {
44            bits: bools.iter().map(|&b| u8::from(b)).collect(),
45        }
46    }
47
48    /// Convert to vector of booleans
49    #[must_use]
50    pub fn to_bools(&self) -> Vec<bool> {
51        self.bits.iter().map(|&b| b == 1).collect()
52    }
53
54    /// Convert to integer (little-endian)
55    #[must_use]
56    pub fn to_int(&self) -> usize {
57        self.bits
58            .iter()
59            .enumerate()
60            .map(|(i, &bit)| (bit as usize) << i)
61            .sum()
62    }
63
64    /// Create from integer (little-endian)
65    #[must_use]
66    pub fn from_int(mut value: usize, num_bits: usize) -> Self {
67        let mut bits = Vec::with_capacity(num_bits);
68        for _ in 0..num_bits {
69            bits.push((value & 1) as u8);
70            value >>= 1;
71        }
72        Self { bits }
73    }
74
75    /// Number of bits
76    #[must_use]
77    pub fn len(&self) -> usize {
78        self.bits.len()
79    }
80
81    /// Check if empty
82    #[must_use]
83    pub fn is_empty(&self) -> bool {
84        self.bits.is_empty()
85    }
86
87    /// Hamming weight (number of 1s)
88    #[must_use]
89    pub fn weight(&self) -> usize {
90        self.bits.iter().map(|&b| b as usize).sum()
91    }
92
93    /// Hamming distance to another bit string
94    #[must_use]
95    pub fn distance(&self, other: &Self) -> usize {
96        if self.len() != other.len() {
97            return usize::MAX; // Invalid comparison
98        }
99        self.bits
100            .iter()
101            .zip(&other.bits)
102            .map(|(&a, &b)| (a ^ b) as usize)
103            .sum()
104    }
105}
106
107impl std::fmt::Display for BitString {
108    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
109        for &bit in &self.bits {
110            write!(f, "{bit}")?;
111        }
112        Ok(())
113    }
114}
115
116/// Measurement statistics
117#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct MeasurementStatistics {
119    /// Frequency count for each outcome
120    pub counts: HashMap<BitString, usize>,
121    /// Most frequent outcome
122    pub mode: BitString,
123    /// Probability estimates for each outcome
124    pub probabilities: HashMap<BitString, f64>,
125    /// Variance in the probability estimates
126    pub probability_variance: f64,
127    /// Statistical confidence intervals
128    pub confidence_intervals: HashMap<BitString, (f64, f64)>,
129    /// Entropy of the measurement distribution
130    pub entropy: f64,
131    /// Purity of the measurement distribution
132    pub purity: f64,
133}
134
135/// Sampling configuration
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct SamplingConfig {
138    /// Number of shots to take
139    pub num_shots: usize,
140    /// Random seed for reproducibility
141    pub seed: Option<u64>,
142    /// Confidence level for intervals (e.g., 0.95 for 95%)
143    pub confidence_level: f64,
144    /// Whether to compute full statistics
145    pub compute_statistics: bool,
146    /// Whether to estimate convergence
147    pub estimate_convergence: bool,
148    /// Convergence check interval (number of shots)
149    pub convergence_check_interval: usize,
150    /// Convergence tolerance
151    pub convergence_tolerance: f64,
152    /// Maximum number of shots for convergence
153    pub max_shots_for_convergence: usize,
154}
155
156impl Default for SamplingConfig {
157    fn default() -> Self {
158        Self {
159            num_shots: 1024,
160            seed: None,
161            confidence_level: 0.95,
162            compute_statistics: true,
163            estimate_convergence: false,
164            convergence_check_interval: 100,
165            convergence_tolerance: 0.01,
166            max_shots_for_convergence: 10_000,
167        }
168    }
169}
170
171/// Shot-based quantum sampler
172pub struct QuantumSampler {
173    /// Random number generator
174    rng: ChaCha8Rng,
175    /// Current configuration
176    config: SamplingConfig,
177}
178
179impl QuantumSampler {
180    /// Create new sampler with configuration
181    #[must_use]
182    pub fn new(config: SamplingConfig) -> Self {
183        let rng = if let Some(seed) = config.seed {
184            ChaCha8Rng::seed_from_u64(seed)
185        } else {
186            ChaCha8Rng::from_rng(&mut thread_rng())
187        };
188
189        Self { rng, config }
190    }
191
192    /// Sample measurements from a quantum state
193    pub fn sample_state(&mut self, state: &Array1<Complex64>) -> Result<ShotResult> {
194        let num_qubits = (state.len() as f64).log2() as usize;
195        if 1 << num_qubits != state.len() {
196            return Err(SimulatorError::InvalidInput(
197                "State vector dimension must be a power of 2".to_string(),
198            ));
199        }
200
201        // Compute probability distribution
202        let probabilities: Vec<f64> = state.iter().map(scirs2_core::Complex::norm_sqr).collect();
203
204        // Validate normalization
205        let total_prob: f64 = probabilities.iter().sum();
206        if (total_prob - 1.0).abs() > 1e-10 {
207            return Err(SimulatorError::InvalidInput(format!(
208                "State vector not normalized: total probability = {total_prob}"
209            )));
210        }
211
212        // Sample outcomes
213        let mut outcomes = Vec::with_capacity(self.config.num_shots);
214        for _ in 0..self.config.num_shots {
215            let sample = self.sample_from_distribution(&probabilities)?;
216            outcomes.push(BitString::from_int(sample, num_qubits));
217        }
218
219        // Compute statistics if requested
220        let statistics = if self.config.compute_statistics {
221            self.compute_statistics(&outcomes)?
222        } else {
223            MeasurementStatistics {
224                counts: HashMap::new(),
225                mode: BitString::from_int(0, num_qubits),
226                probabilities: HashMap::new(),
227                probability_variance: 0.0,
228                confidence_intervals: HashMap::new(),
229                entropy: 0.0,
230                purity: 0.0,
231            }
232        };
233
234        Ok(ShotResult {
235            outcomes,
236            num_shots: self.config.num_shots,
237            statistics,
238            config: self.config.clone(),
239        })
240    }
241
242    /// Sample measurements from a state with noise
243    pub fn sample_state_with_noise(
244        &mut self,
245        state: &Array1<Complex64>,
246        noise_model: &dyn NoiseModel,
247    ) -> Result<ShotResult> {
248        // Apply noise model to the state
249        let noisy_state = noise_model.apply_readout_noise(state)?;
250        self.sample_state(&noisy_state)
251    }
252
253    /// Sample expectation value of an observable
254    pub fn sample_expectation(
255        &mut self,
256        state: &Array1<Complex64>,
257        observable: &PauliOperatorSum,
258    ) -> Result<ExpectationResult> {
259        let mut expectation_values = Vec::new();
260        let mut variances = Vec::new();
261
262        // Sample each Pauli term separately
263        for term in &observable.terms {
264            let term_result = self.sample_pauli_expectation(state, term)?;
265            expectation_values.push(term_result.expectation * term.coefficient.re);
266            variances.push(term_result.variance * term.coefficient.re.powi(2));
267        }
268
269        // Combine results
270        let total_expectation: f64 = expectation_values.iter().sum();
271        let total_variance: f64 = variances.iter().sum();
272        let standard_error = (total_variance / self.config.num_shots as f64).sqrt();
273
274        // Confidence interval
275        let z_score = self.get_z_score(self.config.confidence_level);
276        let confidence_interval = (
277            z_score.mul_add(-standard_error, total_expectation),
278            z_score.mul_add(standard_error, total_expectation),
279        );
280
281        Ok(ExpectationResult {
282            expectation: total_expectation,
283            variance: total_variance,
284            standard_error,
285            confidence_interval,
286            num_shots: self.config.num_shots,
287        })
288    }
289
290    /// Sample expectation value of a single Pauli string
291    fn sample_pauli_expectation(
292        &mut self,
293        state: &Array1<Complex64>,
294        pauli_string: &PauliString,
295    ) -> Result<ExpectationResult> {
296        // For Pauli measurements, eigenvalues are ±1
297        // We need to measure in the appropriate basis
298
299        let num_qubits = pauli_string.num_qubits;
300        let mut measurements = Vec::with_capacity(self.config.num_shots);
301
302        for _ in 0..self.config.num_shots {
303            // Measure each qubit in the appropriate Pauli basis
304            let outcome = self.measure_pauli_basis(state, pauli_string)?;
305            measurements.push(outcome);
306        }
307
308        // Compute statistics
309        let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
310        let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
311            / measurements.len() as f64;
312
313        let standard_error = (variance / measurements.len() as f64).sqrt();
314        let z_score = self.get_z_score(self.config.confidence_level);
315        let confidence_interval = (
316            z_score.mul_add(-standard_error, mean),
317            z_score.mul_add(standard_error, mean),
318        );
319
320        Ok(ExpectationResult {
321            expectation: mean,
322            variance,
323            standard_error,
324            confidence_interval,
325            num_shots: measurements.len(),
326        })
327    }
328
329    /// Measure in Pauli basis (simplified implementation)
330    fn measure_pauli_basis(
331        &mut self,
332        _state: &Array1<Complex64>,
333        _pauli_string: &PauliString,
334    ) -> Result<f64> {
335        // Simplified implementation - return random ±1
336        // In practice, would need to transform state to measurement basis
337        if self.rng.random::<f64>() < 0.5 {
338            Ok(1.0)
339        } else {
340            Ok(-1.0)
341        }
342    }
343
344    /// Sample from discrete probability distribution
345    fn sample_from_distribution(&mut self, probabilities: &[f64]) -> Result<usize> {
346        let random_value = self.rng.random::<f64>();
347        let mut cumulative = 0.0;
348
349        for (i, &prob) in probabilities.iter().enumerate() {
350            cumulative += prob;
351            if random_value <= cumulative {
352                return Ok(i);
353            }
354        }
355
356        // Handle numerical errors - return last index
357        Ok(probabilities.len() - 1)
358    }
359
360    /// Multinomial sampling: draw `n_samples` i.i.d. samples from a discrete
361    /// probability distribution.
362    ///
363    /// The result is a count vector of the same length as `probabilities`, where
364    /// `counts[i]` is the number of times outcome `i` was drawn.  Total counts
365    /// sum to `n_samples`.
366    ///
367    /// The implementation uses the inverse-CDF (Alias/sequential) method which
368    /// is O(k · n_samples) in the number of outcomes k, but correct and
369    /// deterministic given the sampler's current RNG state.
370    pub fn sample_multinomial(
371        &mut self,
372        probabilities: &[f64],
373        n_samples: usize,
374    ) -> Result<Vec<usize>> {
375        if probabilities.is_empty() {
376            return Err(SimulatorError::InvalidInput(
377                "probability distribution must be non-empty".to_string(),
378            ));
379        }
380
381        // Validate and normalise the distribution
382        let total: f64 = probabilities.iter().sum();
383        if total <= 0.0 || !total.is_finite() {
384            return Err(SimulatorError::InvalidInput(format!(
385                "probability distribution must sum to a positive finite value, got {total}"
386            )));
387        }
388
389        let normalised: Vec<f64> = probabilities.iter().map(|p| p / total).collect();
390
391        // Build the CDF once for the entire batch
392        let mut cdf = Vec::with_capacity(normalised.len());
393        let mut running = 0.0;
394        for &p in &normalised {
395            running += p;
396            cdf.push(running);
397        }
398        // Force the last entry to exactly 1.0 to avoid floating-point overshoot
399        if let Some(last) = cdf.last_mut() {
400            *last = 1.0;
401        }
402
403        let k = probabilities.len();
404        let mut counts = vec![0usize; k];
405
406        for _ in 0..n_samples {
407            let u: f64 = self.rng.random();
408            // Binary search on the CDF for O(log k) per sample
409            let idx = cdf.partition_point(|&c| c < u).min(k - 1);
410            counts[idx] += 1;
411        }
412
413        Ok(counts)
414    }
415
416    /// Importance sampling: estimate the expectation E_p\[f\] using samples drawn
417    /// from a proposal distribution q.
418    ///
419    /// Given a target distribution `target_probs` (unnormalised is fine) and a
420    /// proposal distribution `proposal_probs` (must be strictly positive wherever
421    /// `target_probs` is non-zero), this method:
422    ///
423    /// 1. Draws `n_samples` indices from `proposal_probs`.
424    /// 2. Evaluates the function values `f_values[i]` at each outcome `i`.
425    /// 3. Computes the self-normalised importance-sampling estimator
426    ///    `Ê = Σ w̃_i f_i` where `w̃_i = w_i / Σ w_j` and `w_i = p_i / q_i`.
427    ///
428    /// Returns `(estimate, effective_sample_size)`.  The effective sample size
429    /// (ESS) measures how many i.i.d. samples from `target_probs` the weighted
430    /// sample is worth: `ESS = (Σ w̃_i)² / Σ w̃_i²`.
431    ///
432    /// # Errors
433    ///
434    /// Returns an error if the distributions have different lengths, if
435    /// `proposal_probs` has a zero entry where `target_probs` is non-zero, or
436    /// if either distribution is empty.
437    pub fn importance_sampling(
438        &mut self,
439        target_probs: &[f64],
440        proposal_probs: &[f64],
441        f_values: &[f64],
442        n_samples: usize,
443    ) -> Result<ImportanceSamplingResult> {
444        let k = target_probs.len();
445        if k == 0 {
446            return Err(SimulatorError::InvalidInput(
447                "distributions must be non-empty".to_string(),
448            ));
449        }
450        if proposal_probs.len() != k || f_values.len() != k {
451            return Err(SimulatorError::InvalidInput(format!(
452                "all arrays must have the same length (got {k}, {}, {})",
453                proposal_probs.len(),
454                f_values.len()
455            )));
456        }
457
458        // Normalise both distributions
459        let p_total: f64 = target_probs.iter().sum();
460        let q_total: f64 = proposal_probs.iter().sum();
461        if p_total <= 0.0 || !p_total.is_finite() {
462            return Err(SimulatorError::InvalidInput(
463                "target distribution must sum to a positive finite value".to_string(),
464            ));
465        }
466        if q_total <= 0.0 || !q_total.is_finite() {
467            return Err(SimulatorError::InvalidInput(
468                "proposal distribution must sum to a positive finite value".to_string(),
469            ));
470        }
471
472        let p_norm: Vec<f64> = target_probs.iter().map(|p| p / p_total).collect();
473        let q_norm: Vec<f64> = proposal_probs.iter().map(|q| q / q_total).collect();
474
475        // Check support condition: p(i) > 0 ⇒ q(i) > 0
476        for i in 0..k {
477            if p_norm[i] > 1e-15 && q_norm[i] < 1e-300 {
478                return Err(SimulatorError::InvalidInput(format!(
479                    "proposal probability at index {i} is zero where target is non-zero; \
480                     importance sampling requires the proposal to cover the target's support"
481                )));
482            }
483        }
484
485        // Draw n_samples from the proposal
486        let samples: Vec<usize> = (0..n_samples)
487            .map(|_| self.sample_from_distribution(&q_norm))
488            .collect::<Result<Vec<_>>>()?;
489
490        // Compute unnormalised importance weights w_i = p(x_i) / q(x_i)
491        let mut raw_weights: Vec<f64> = samples
492            .iter()
493            .map(|&idx| {
494                let q = q_norm[idx];
495                if q < 1e-300 {
496                    0.0
497                } else {
498                    p_norm[idx] / q
499                }
500            })
501            .collect();
502
503        // Self-normalise: w̃_i = w_i / Σ w_j
504        let w_sum: f64 = raw_weights.iter().sum();
505        if w_sum <= 0.0 {
506            return Err(SimulatorError::InvalidInput(
507                "all importance weights are zero; the proposal does not cover the target"
508                    .to_string(),
509            ));
510        }
511        for w in &mut raw_weights {
512            *w /= w_sum;
513        }
514
515        // Compute self-normalised IS estimator
516        let estimate: f64 = samples
517            .iter()
518            .zip(raw_weights.iter())
519            .map(|(&idx, &w)| w * f_values[idx])
520            .sum();
521
522        // Effective sample size = 1 / Σ w̃_i²
523        let w_sq_sum: f64 = raw_weights.iter().map(|w| w * w).sum();
524        let effective_sample_size = if w_sq_sum > 0.0 { 1.0 / w_sq_sum } else { 0.0 };
525
526        // Estimate variance of the estimator using the sample variance of w̃_i f(x_i)
527        let weighted_f: Vec<f64> = samples
528            .iter()
529            .zip(raw_weights.iter())
530            .map(|(&idx, &w)| w * f_values[idx])
531            .collect();
532        let mean_wf: f64 = weighted_f.iter().sum::<f64>() / n_samples as f64;
533        let variance: f64 = weighted_f
534            .iter()
535            .map(|&wf| (wf - mean_wf).powi(2))
536            .sum::<f64>()
537            / (n_samples as f64 - 1.0).max(1.0);
538
539        Ok(ImportanceSamplingResult {
540            estimate,
541            effective_sample_size,
542            variance,
543            n_samples,
544        })
545    }
546
547    /// Compute measurement statistics
548    fn compute_statistics(&self, outcomes: &[BitString]) -> Result<MeasurementStatistics> {
549        let mut counts = HashMap::new();
550        let total_shots = outcomes.len() as f64;
551
552        // Count frequencies
553        for outcome in outcomes {
554            *counts.entry(outcome.clone()).or_insert(0) += 1;
555        }
556
557        // Find mode
558        let mode = counts.iter().max_by_key(|(_, &count)| count).map_or_else(
559            || BitString::from_int(0, outcomes[0].len()),
560            |(outcome, _)| outcome.clone(),
561        );
562
563        // Compute probabilities
564        let mut probabilities = HashMap::new();
565        let mut confidence_intervals = HashMap::new();
566        let z_score = self.get_z_score(self.config.confidence_level);
567
568        for (outcome, &count) in &counts {
569            let prob = count as f64 / total_shots;
570            probabilities.insert(outcome.clone(), prob);
571
572            // Binomial confidence interval
573            let std_error = (prob * (1.0 - prob) / total_shots).sqrt();
574            let margin = z_score * std_error;
575            confidence_intervals.insert(
576                outcome.clone(),
577                ((prob - margin).max(0.0), (prob + margin).min(1.0)),
578            );
579        }
580
581        // Compute entropy
582        let entropy = probabilities
583            .values()
584            .filter(|&&p| p > 0.0)
585            .map(|&p| -p * p.ln())
586            .sum::<f64>();
587
588        // Compute purity (sum of squared probabilities)
589        let purity = probabilities.values().map(|&p| p * p).sum::<f64>();
590
591        // Compute overall probability variance
592        let mean_prob = 1.0 / probabilities.len() as f64;
593        let probability_variance = probabilities
594            .values()
595            .map(|&p| (p - mean_prob).powi(2))
596            .sum::<f64>()
597            / probabilities.len() as f64;
598
599        Ok(MeasurementStatistics {
600            counts,
601            mode,
602            probabilities,
603            probability_variance,
604            confidence_intervals,
605            entropy,
606            purity,
607        })
608    }
609
610    /// Get z-score for confidence level
611    fn get_z_score(&self, confidence_level: f64) -> f64 {
612        // Simplified - use common values
613        match (confidence_level * 100.0) as i32 {
614            90 => 1.645,
615            95 => 1.96,
616            99 => 2.576,
617            _ => 1.96, // Default to 95%
618        }
619    }
620
621    /// Estimate convergence of sampling
622    pub fn estimate_convergence(
623        &mut self,
624        state: &Array1<Complex64>,
625        observable: &PauliOperatorSum,
626    ) -> Result<ConvergenceResult> {
627        let mut expectation_history = Vec::new();
628        let mut variance_history = Vec::new();
629        let mut shots_taken = 0;
630        let mut converged = false;
631
632        while shots_taken < self.config.max_shots_for_convergence && !converged {
633            // Take a batch of measurements
634            let batch_shots = self
635                .config
636                .convergence_check_interval
637                .min(self.config.max_shots_for_convergence - shots_taken);
638
639            // Temporarily adjust shot count for this batch
640            let original_shots = self.config.num_shots;
641            self.config.num_shots = batch_shots;
642
643            let result = self.sample_expectation(state, observable)?;
644
645            // Restore original shot count
646            self.config.num_shots = original_shots;
647
648            expectation_history.push(result.expectation);
649            variance_history.push(result.variance);
650            shots_taken += batch_shots;
651
652            // Check convergence
653            if expectation_history.len() >= 3 {
654                let recent_values = &expectation_history[expectation_history.len() - 3..];
655                let max_diff = recent_values
656                    .iter()
657                    .zip(recent_values.iter().skip(1))
658                    .map(|(a, b)| (a - b).abs())
659                    .fold(0.0, f64::max);
660
661                if max_diff < self.config.convergence_tolerance {
662                    converged = true;
663                }
664            }
665        }
666
667        // Compute final estimates
668        let final_expectation = expectation_history.last().copied().unwrap_or(0.0);
669        let expectation_std = if expectation_history.len() > 1 {
670            let mean = expectation_history.iter().sum::<f64>() / expectation_history.len() as f64;
671            (expectation_history
672                .iter()
673                .map(|x| (x - mean).powi(2))
674                .sum::<f64>()
675                / (expectation_history.len() - 1) as f64)
676                .sqrt()
677        } else {
678            0.0
679        };
680
681        Ok(ConvergenceResult {
682            converged,
683            shots_taken,
684            final_expectation,
685            expectation_history,
686            variance_history,
687            convergence_rate: expectation_std,
688        })
689    }
690}
691
692/// Result of expectation value sampling
693#[derive(Debug, Clone, Serialize, Deserialize)]
694pub struct ExpectationResult {
695    /// Expectation value estimate
696    pub expectation: f64,
697    /// Variance estimate
698    pub variance: f64,
699    /// Standard error
700    pub standard_error: f64,
701    /// Confidence interval
702    pub confidence_interval: (f64, f64),
703    /// Number of shots used
704    pub num_shots: usize,
705}
706
707/// Result of convergence estimation
708#[derive(Debug, Clone, Serialize, Deserialize)]
709pub struct ConvergenceResult {
710    /// Whether convergence was achieved
711    pub converged: bool,
712    /// Total shots taken
713    pub shots_taken: usize,
714    /// Final expectation value
715    pub final_expectation: f64,
716    /// History of expectation values
717    pub expectation_history: Vec<f64>,
718    /// History of variances
719    pub variance_history: Vec<f64>,
720    /// Convergence rate (standard deviation of recent estimates)
721    pub convergence_rate: f64,
722}
723
724/// Noise model trait for realistic sampling
725pub trait NoiseModel: Send + Sync {
726    /// Apply readout noise to measurements
727    fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>>;
728
729    /// Get readout error probability for qubit
730    fn readout_error_probability(&self, qubit: usize) -> f64;
731}
732
733/// Simple readout noise model
734#[derive(Debug, Clone)]
735pub struct SimpleReadoutNoise {
736    /// Error probability for each qubit
737    pub error_probs: Vec<f64>,
738}
739
740impl SimpleReadoutNoise {
741    /// Create uniform readout noise
742    #[must_use]
743    pub fn uniform(num_qubits: usize, error_prob: f64) -> Self {
744        Self {
745            error_probs: vec![error_prob; num_qubits],
746        }
747    }
748}
749
750impl NoiseModel for SimpleReadoutNoise {
751    fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>> {
752        // Simplified implementation - in practice would need proper POVM modeling
753        Ok(state.clone())
754    }
755
756    fn readout_error_probability(&self, qubit: usize) -> f64 {
757        self.error_probs.get(qubit).copied().unwrap_or(0.0)
758    }
759}
760
761/// Utility functions for shot sampling analysis
762pub mod analysis {
763    use super::{ComparisonResult, ShotResult};
764
765    /// Compute statistical power for detecting effect
766    #[must_use]
767    pub fn statistical_power(effect_size: f64, num_shots: usize, significance_level: f64) -> f64 {
768        // Simplified power analysis
769        let standard_error = 1.0 / (num_shots as f64).sqrt();
770        let z_critical = match (significance_level * 100.0) as i32 {
771            1 => 2.576,
772            5 => 1.96,
773            10 => 1.645,
774            _ => 1.96,
775        };
776
777        let z_beta = (effect_size / standard_error) - z_critical;
778        normal_cdf(z_beta)
779    }
780
781    /// Estimate required shots for desired precision
782    #[must_use]
783    pub fn required_shots_for_precision(desired_error: f64, confidence_level: f64) -> usize {
784        let z_score = match (confidence_level * 100.0) as i32 {
785            90 => 1.645,
786            95 => 1.96,
787            99 => 2.576,
788            _ => 1.96,
789        };
790
791        // For binomial: n ≥ (z²/4ε²) for worst case p=0.5
792        let n = (z_score * z_score) / (4.0 * desired_error * desired_error);
793        n.ceil() as usize
794    }
795
796    /// Compare two shot results statistically
797    #[must_use]
798    pub fn compare_shot_results(
799        result1: &ShotResult,
800        result2: &ShotResult,
801        significance_level: f64,
802    ) -> ComparisonResult {
803        // Chi-square test for distribution comparison
804        let mut chi_square = 0.0;
805        let mut degrees_of_freedom: usize = 0;
806
807        // Get all unique outcomes
808        let mut all_outcomes = std::collections::HashSet::new();
809        all_outcomes.extend(result1.statistics.counts.keys());
810        all_outcomes.extend(result2.statistics.counts.keys());
811
812        for outcome in &all_outcomes {
813            let count1 = result1.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
814            let count2 = result2.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
815
816            let total1 = result1.num_shots as f64;
817            let total2 = result2.num_shots as f64;
818
819            let expected1 = (count1 + count2) * total1 / (total1 + total2);
820            let expected2 = (count1 + count2) * total2 / (total1 + total2);
821
822            if expected1 > 5.0 && expected2 > 5.0 {
823                chi_square += (count1 - expected1).powi(2) / expected1;
824                chi_square += (count2 - expected2).powi(2) / expected2;
825                degrees_of_freedom += 1;
826            }
827        }
828
829        degrees_of_freedom = degrees_of_freedom.saturating_sub(1);
830
831        // Critical value for given significance level (simplified)
832        let critical_value = match (significance_level * 100.0) as i32 {
833            1 => 6.635, // Very rough approximation
834            5 => 3.841,
835            10 => 2.706,
836            _ => 3.841,
837        };
838
839        ComparisonResult {
840            chi_square,
841            degrees_of_freedom,
842            p_value: if chi_square > critical_value {
843                0.01
844            } else {
845                0.1
846            }, // Rough
847            significant: chi_square > critical_value,
848        }
849    }
850
851    /// Normal CDF approximation
852    fn normal_cdf(x: f64) -> f64 {
853        // Simplified approximation
854        0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
855    }
856
857    /// Error function approximation
858    fn erf(x: f64) -> f64 {
859        // Abramowitz and Stegun approximation
860        let a1 = 0.254_829_592;
861        let a2 = -0.284_496_736;
862        let a3 = 1.421_413_741;
863        let a4 = -1.453_152_027;
864        let a5 = 1.061_405_429;
865        let p = 0.327_591_1;
866
867        let sign = if x < 0.0 { -1.0 } else { 1.0 };
868        let x = x.abs();
869
870        let t = 1.0 / (1.0 + p * x);
871        let y = ((a5 * t + a4).mul_add(t, a3).mul_add(t, a2).mul_add(t, a1) * t)
872            .mul_add(-(-x * x).exp(), 1.0);
873
874        sign * y
875    }
876}
877
878/// Result of an importance sampling estimation
879#[derive(Debug, Clone, Serialize, Deserialize)]
880pub struct ImportanceSamplingResult {
881    /// Self-normalised importance-sampling estimator Ê = Σ w̃_i f(x_i)
882    pub estimate: f64,
883    /// Effective sample size: ESS = 1 / Σ w̃_i² (higher is better)
884    pub effective_sample_size: f64,
885    /// Sample variance of the weighted estimator values
886    pub variance: f64,
887    /// Number of samples drawn from the proposal distribution
888    pub n_samples: usize,
889}
890
891/// Result of statistical comparison
892#[derive(Debug, Clone, Serialize, Deserialize)]
893pub struct ComparisonResult {
894    /// Chi-square statistic
895    pub chi_square: f64,
896    /// Degrees of freedom
897    pub degrees_of_freedom: usize,
898    /// P-value
899    pub p_value: f64,
900    /// Whether difference is significant
901    pub significant: bool,
902}
903
904#[cfg(test)]
905#[allow(clippy::field_reassign_with_default)]
906mod tests {
907    use super::*;
908
909    #[test]
910    fn test_bit_string() {
911        let bs = BitString::from_int(5, 4); // 5 = 1010 in binary
912        assert_eq!(bs.bits, vec![1, 0, 1, 0]);
913        assert_eq!(bs.to_int(), 5);
914        assert_eq!(bs.weight(), 2);
915    }
916
917    #[test]
918    fn test_sampler_creation() {
919        let config = SamplingConfig::default();
920        let sampler = QuantumSampler::new(config);
921        assert_eq!(sampler.config.num_shots, 1024);
922    }
923
924    #[test]
925    fn test_uniform_state_sampling() {
926        let mut config = SamplingConfig::default();
927        config.num_shots = 100;
928        config.seed = Some(42);
929
930        let mut sampler = QuantumSampler::new(config);
931
932        // Create uniform superposition |+> = (|0> + |1>)/√2
933        let state = Array1::from_vec(vec![
934            Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
935            Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
936        ]);
937
938        let result = sampler
939            .sample_state(&state)
940            .expect("Failed to sample state");
941        assert_eq!(result.num_shots, 100);
942        assert_eq!(result.outcomes.len(), 100);
943
944        // Check that we got both |0> and |1> outcomes
945        let has_zero = result.outcomes.iter().any(|bs| bs.to_int() == 0);
946        let has_one = result.outcomes.iter().any(|bs| bs.to_int() == 1);
947        assert!(has_zero && has_one);
948    }
949
950    #[test]
951    fn test_required_shots_calculation() {
952        let shots = analysis::required_shots_for_precision(0.01, 0.95);
953        assert!(shots > 9000); // Should need many shots for 1% precision
954    }
955
956    #[test]
957    fn test_sample_multinomial_basic() {
958        let mut config = SamplingConfig::default();
959        config.seed = Some(123);
960        let mut sampler = QuantumSampler::new(config);
961
962        // Deterministic distribution: all probability on outcome 0
963        let probs = vec![1.0, 0.0, 0.0];
964        let counts = sampler
965            .sample_multinomial(&probs, 100)
966            .expect("multinomial sampling should succeed");
967
968        assert_eq!(counts.len(), 3);
969        assert_eq!(counts[0], 100);
970        assert_eq!(counts[1], 0);
971        assert_eq!(counts[2], 0);
972    }
973
974    #[test]
975    fn test_sample_multinomial_uniform() {
976        let mut config = SamplingConfig::default();
977        config.seed = Some(42);
978        let mut sampler = QuantumSampler::new(config);
979
980        let probs = vec![0.5, 0.5];
981        let counts = sampler
982            .sample_multinomial(&probs, 1000)
983            .expect("multinomial sampling should succeed");
984
985        assert_eq!(counts.len(), 2);
986        assert_eq!(counts[0] + counts[1], 1000);
987        // Both outcomes should appear with roughly equal frequency
988        assert!(counts[0] > 400, "outcome 0 should appear >40% of the time");
989        assert!(counts[1] > 400, "outcome 1 should appear >40% of the time");
990    }
991
992    #[test]
993    fn test_sample_multinomial_counts_sum_to_n() {
994        let mut config = SamplingConfig::default();
995        config.seed = Some(7);
996        let mut sampler = QuantumSampler::new(config);
997
998        let probs = vec![0.1, 0.3, 0.2, 0.4];
999        let n = 500;
1000        let counts = sampler
1001            .sample_multinomial(&probs, n)
1002            .expect("multinomial sampling should succeed");
1003
1004        let total: usize = counts.iter().sum();
1005        assert_eq!(total, n, "all counts must sum to n_samples");
1006    }
1007
1008    #[test]
1009    fn test_importance_sampling_identity_proposal() {
1010        let mut config = SamplingConfig::default();
1011        config.seed = Some(99);
1012        let mut sampler = QuantumSampler::new(config);
1013
1014        // When target == proposal, the IS estimator is just the Monte Carlo average.
1015        // E[f] under uniform(3) with f = [1, 2, 3] should be ~2.0
1016        let probs = vec![1.0 / 3.0; 3];
1017        let f = vec![1.0, 2.0, 3.0];
1018
1019        let result = sampler
1020            .importance_sampling(&probs, &probs, &f, 5000)
1021            .expect("importance sampling should succeed");
1022
1023        // The estimate should be close to 2.0 (E[f] under uniform)
1024        assert!(
1025            (result.estimate - 2.0).abs() < 0.3,
1026            "IS estimate should be near 2.0, got {}",
1027            result.estimate
1028        );
1029        assert!(
1030            result.effective_sample_size > 100.0,
1031            "ESS should be large for matched proposal"
1032        );
1033    }
1034
1035    #[test]
1036    fn test_importance_sampling_reweighting() {
1037        let mut config = SamplingConfig::default();
1038        config.seed = Some(55);
1039        let mut sampler = QuantumSampler::new(config);
1040
1041        // target = [0.9, 0.1], proposal = [0.5, 0.5], f = [0, 1]
1042        // E_target[f] = 0*0.9 + 1*0.1 = 0.1
1043        let target = vec![0.9, 0.1];
1044        let proposal = vec![0.5, 0.5];
1045        let f = vec![0.0, 1.0];
1046
1047        let result = sampler
1048            .importance_sampling(&target, &proposal, &f, 10_000)
1049            .expect("importance sampling should succeed");
1050
1051        assert!(
1052            (result.estimate - 0.1).abs() < 0.05,
1053            "IS estimate should be near 0.1, got {}",
1054            result.estimate
1055        );
1056    }
1057}