1use scirs2_core::ndarray::{Array1, Array2};
8use scirs2_core::random::prelude::*;
9use scirs2_core::random::ChaCha8Rng;
10use scirs2_core::random::{RngExt, SeedableRng};
11use scirs2_core::Complex64;
12use serde::{Deserialize, Serialize};
13use std::collections::HashMap;
14
15use crate::error::{Result, SimulatorError};
16use crate::pauli::{PauliOperatorSum, PauliString};
17use crate::statevector::StateVectorSimulator;
18
19#[derive(Debug, Clone, Serialize, Deserialize)]
21pub struct ShotResult {
22 pub outcomes: Vec<BitString>,
24 pub num_shots: usize,
26 pub statistics: MeasurementStatistics,
28 pub config: SamplingConfig,
30}
31
32#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
34pub struct BitString {
35 pub bits: Vec<u8>,
37}
38
39impl BitString {
40 #[must_use]
42 pub fn from_bools(bools: &[bool]) -> Self {
43 Self {
44 bits: bools.iter().map(|&b| u8::from(b)).collect(),
45 }
46 }
47
48 #[must_use]
50 pub fn to_bools(&self) -> Vec<bool> {
51 self.bits.iter().map(|&b| b == 1).collect()
52 }
53
54 #[must_use]
56 pub fn to_int(&self) -> usize {
57 self.bits
58 .iter()
59 .enumerate()
60 .map(|(i, &bit)| (bit as usize) << i)
61 .sum()
62 }
63
64 #[must_use]
66 pub fn from_int(mut value: usize, num_bits: usize) -> Self {
67 let mut bits = Vec::with_capacity(num_bits);
68 for _ in 0..num_bits {
69 bits.push((value & 1) as u8);
70 value >>= 1;
71 }
72 Self { bits }
73 }
74
75 #[must_use]
77 pub fn len(&self) -> usize {
78 self.bits.len()
79 }
80
81 #[must_use]
83 pub fn is_empty(&self) -> bool {
84 self.bits.is_empty()
85 }
86
87 #[must_use]
89 pub fn weight(&self) -> usize {
90 self.bits.iter().map(|&b| b as usize).sum()
91 }
92
93 #[must_use]
95 pub fn distance(&self, other: &Self) -> usize {
96 if self.len() != other.len() {
97 return usize::MAX; }
99 self.bits
100 .iter()
101 .zip(&other.bits)
102 .map(|(&a, &b)| (a ^ b) as usize)
103 .sum()
104 }
105}
106
107impl std::fmt::Display for BitString {
108 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
109 for &bit in &self.bits {
110 write!(f, "{bit}")?;
111 }
112 Ok(())
113 }
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct MeasurementStatistics {
119 pub counts: HashMap<BitString, usize>,
121 pub mode: BitString,
123 pub probabilities: HashMap<BitString, f64>,
125 pub probability_variance: f64,
127 pub confidence_intervals: HashMap<BitString, (f64, f64)>,
129 pub entropy: f64,
131 pub purity: f64,
133}
134
135#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct SamplingConfig {
138 pub num_shots: usize,
140 pub seed: Option<u64>,
142 pub confidence_level: f64,
144 pub compute_statistics: bool,
146 pub estimate_convergence: bool,
148 pub convergence_check_interval: usize,
150 pub convergence_tolerance: f64,
152 pub max_shots_for_convergence: usize,
154}
155
156impl Default for SamplingConfig {
157 fn default() -> Self {
158 Self {
159 num_shots: 1024,
160 seed: None,
161 confidence_level: 0.95,
162 compute_statistics: true,
163 estimate_convergence: false,
164 convergence_check_interval: 100,
165 convergence_tolerance: 0.01,
166 max_shots_for_convergence: 10_000,
167 }
168 }
169}
170
171pub struct QuantumSampler {
173 rng: ChaCha8Rng,
175 config: SamplingConfig,
177}
178
179impl QuantumSampler {
180 #[must_use]
182 pub fn new(config: SamplingConfig) -> Self {
183 let rng = if let Some(seed) = config.seed {
184 ChaCha8Rng::seed_from_u64(seed)
185 } else {
186 ChaCha8Rng::from_rng(&mut thread_rng())
187 };
188
189 Self { rng, config }
190 }
191
192 pub fn sample_state(&mut self, state: &Array1<Complex64>) -> Result<ShotResult> {
194 let num_qubits = (state.len() as f64).log2() as usize;
195 if 1 << num_qubits != state.len() {
196 return Err(SimulatorError::InvalidInput(
197 "State vector dimension must be a power of 2".to_string(),
198 ));
199 }
200
201 let probabilities: Vec<f64> = state.iter().map(scirs2_core::Complex::norm_sqr).collect();
203
204 let total_prob: f64 = probabilities.iter().sum();
206 if (total_prob - 1.0).abs() > 1e-10 {
207 return Err(SimulatorError::InvalidInput(format!(
208 "State vector not normalized: total probability = {total_prob}"
209 )));
210 }
211
212 let mut outcomes = Vec::with_capacity(self.config.num_shots);
214 for _ in 0..self.config.num_shots {
215 let sample = self.sample_from_distribution(&probabilities)?;
216 outcomes.push(BitString::from_int(sample, num_qubits));
217 }
218
219 let statistics = if self.config.compute_statistics {
221 self.compute_statistics(&outcomes)?
222 } else {
223 MeasurementStatistics {
224 counts: HashMap::new(),
225 mode: BitString::from_int(0, num_qubits),
226 probabilities: HashMap::new(),
227 probability_variance: 0.0,
228 confidence_intervals: HashMap::new(),
229 entropy: 0.0,
230 purity: 0.0,
231 }
232 };
233
234 Ok(ShotResult {
235 outcomes,
236 num_shots: self.config.num_shots,
237 statistics,
238 config: self.config.clone(),
239 })
240 }
241
242 pub fn sample_state_with_noise(
244 &mut self,
245 state: &Array1<Complex64>,
246 noise_model: &dyn NoiseModel,
247 ) -> Result<ShotResult> {
248 let noisy_state = noise_model.apply_readout_noise(state)?;
250 self.sample_state(&noisy_state)
251 }
252
253 pub fn sample_expectation(
255 &mut self,
256 state: &Array1<Complex64>,
257 observable: &PauliOperatorSum,
258 ) -> Result<ExpectationResult> {
259 let mut expectation_values = Vec::new();
260 let mut variances = Vec::new();
261
262 for term in &observable.terms {
264 let term_result = self.sample_pauli_expectation(state, term)?;
265 expectation_values.push(term_result.expectation * term.coefficient.re);
266 variances.push(term_result.variance * term.coefficient.re.powi(2));
267 }
268
269 let total_expectation: f64 = expectation_values.iter().sum();
271 let total_variance: f64 = variances.iter().sum();
272 let standard_error = (total_variance / self.config.num_shots as f64).sqrt();
273
274 let z_score = self.get_z_score(self.config.confidence_level);
276 let confidence_interval = (
277 z_score.mul_add(-standard_error, total_expectation),
278 z_score.mul_add(standard_error, total_expectation),
279 );
280
281 Ok(ExpectationResult {
282 expectation: total_expectation,
283 variance: total_variance,
284 standard_error,
285 confidence_interval,
286 num_shots: self.config.num_shots,
287 })
288 }
289
290 fn sample_pauli_expectation(
292 &mut self,
293 state: &Array1<Complex64>,
294 pauli_string: &PauliString,
295 ) -> Result<ExpectationResult> {
296 let num_qubits = pauli_string.num_qubits;
300 let mut measurements = Vec::with_capacity(self.config.num_shots);
301
302 for _ in 0..self.config.num_shots {
303 let outcome = self.measure_pauli_basis(state, pauli_string)?;
305 measurements.push(outcome);
306 }
307
308 let mean = measurements.iter().sum::<f64>() / measurements.len() as f64;
310 let variance = measurements.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
311 / measurements.len() as f64;
312
313 let standard_error = (variance / measurements.len() as f64).sqrt();
314 let z_score = self.get_z_score(self.config.confidence_level);
315 let confidence_interval = (
316 z_score.mul_add(-standard_error, mean),
317 z_score.mul_add(standard_error, mean),
318 );
319
320 Ok(ExpectationResult {
321 expectation: mean,
322 variance,
323 standard_error,
324 confidence_interval,
325 num_shots: measurements.len(),
326 })
327 }
328
329 fn measure_pauli_basis(
331 &mut self,
332 _state: &Array1<Complex64>,
333 _pauli_string: &PauliString,
334 ) -> Result<f64> {
335 if self.rng.random::<f64>() < 0.5 {
338 Ok(1.0)
339 } else {
340 Ok(-1.0)
341 }
342 }
343
344 fn sample_from_distribution(&mut self, probabilities: &[f64]) -> Result<usize> {
346 let random_value = self.rng.random::<f64>();
347 let mut cumulative = 0.0;
348
349 for (i, &prob) in probabilities.iter().enumerate() {
350 cumulative += prob;
351 if random_value <= cumulative {
352 return Ok(i);
353 }
354 }
355
356 Ok(probabilities.len() - 1)
358 }
359
360 pub fn sample_multinomial(
371 &mut self,
372 probabilities: &[f64],
373 n_samples: usize,
374 ) -> Result<Vec<usize>> {
375 if probabilities.is_empty() {
376 return Err(SimulatorError::InvalidInput(
377 "probability distribution must be non-empty".to_string(),
378 ));
379 }
380
381 let total: f64 = probabilities.iter().sum();
383 if total <= 0.0 || !total.is_finite() {
384 return Err(SimulatorError::InvalidInput(format!(
385 "probability distribution must sum to a positive finite value, got {total}"
386 )));
387 }
388
389 let normalised: Vec<f64> = probabilities.iter().map(|p| p / total).collect();
390
391 let mut cdf = Vec::with_capacity(normalised.len());
393 let mut running = 0.0;
394 for &p in &normalised {
395 running += p;
396 cdf.push(running);
397 }
398 if let Some(last) = cdf.last_mut() {
400 *last = 1.0;
401 }
402
403 let k = probabilities.len();
404 let mut counts = vec![0usize; k];
405
406 for _ in 0..n_samples {
407 let u: f64 = self.rng.random();
408 let idx = cdf.partition_point(|&c| c < u).min(k - 1);
410 counts[idx] += 1;
411 }
412
413 Ok(counts)
414 }
415
416 pub fn importance_sampling(
438 &mut self,
439 target_probs: &[f64],
440 proposal_probs: &[f64],
441 f_values: &[f64],
442 n_samples: usize,
443 ) -> Result<ImportanceSamplingResult> {
444 let k = target_probs.len();
445 if k == 0 {
446 return Err(SimulatorError::InvalidInput(
447 "distributions must be non-empty".to_string(),
448 ));
449 }
450 if proposal_probs.len() != k || f_values.len() != k {
451 return Err(SimulatorError::InvalidInput(format!(
452 "all arrays must have the same length (got {k}, {}, {})",
453 proposal_probs.len(),
454 f_values.len()
455 )));
456 }
457
458 let p_total: f64 = target_probs.iter().sum();
460 let q_total: f64 = proposal_probs.iter().sum();
461 if p_total <= 0.0 || !p_total.is_finite() {
462 return Err(SimulatorError::InvalidInput(
463 "target distribution must sum to a positive finite value".to_string(),
464 ));
465 }
466 if q_total <= 0.0 || !q_total.is_finite() {
467 return Err(SimulatorError::InvalidInput(
468 "proposal distribution must sum to a positive finite value".to_string(),
469 ));
470 }
471
472 let p_norm: Vec<f64> = target_probs.iter().map(|p| p / p_total).collect();
473 let q_norm: Vec<f64> = proposal_probs.iter().map(|q| q / q_total).collect();
474
475 for i in 0..k {
477 if p_norm[i] > 1e-15 && q_norm[i] < 1e-300 {
478 return Err(SimulatorError::InvalidInput(format!(
479 "proposal probability at index {i} is zero where target is non-zero; \
480 importance sampling requires the proposal to cover the target's support"
481 )));
482 }
483 }
484
485 let samples: Vec<usize> = (0..n_samples)
487 .map(|_| self.sample_from_distribution(&q_norm))
488 .collect::<Result<Vec<_>>>()?;
489
490 let mut raw_weights: Vec<f64> = samples
492 .iter()
493 .map(|&idx| {
494 let q = q_norm[idx];
495 if q < 1e-300 {
496 0.0
497 } else {
498 p_norm[idx] / q
499 }
500 })
501 .collect();
502
503 let w_sum: f64 = raw_weights.iter().sum();
505 if w_sum <= 0.0 {
506 return Err(SimulatorError::InvalidInput(
507 "all importance weights are zero; the proposal does not cover the target"
508 .to_string(),
509 ));
510 }
511 for w in &mut raw_weights {
512 *w /= w_sum;
513 }
514
515 let estimate: f64 = samples
517 .iter()
518 .zip(raw_weights.iter())
519 .map(|(&idx, &w)| w * f_values[idx])
520 .sum();
521
522 let w_sq_sum: f64 = raw_weights.iter().map(|w| w * w).sum();
524 let effective_sample_size = if w_sq_sum > 0.0 { 1.0 / w_sq_sum } else { 0.0 };
525
526 let weighted_f: Vec<f64> = samples
528 .iter()
529 .zip(raw_weights.iter())
530 .map(|(&idx, &w)| w * f_values[idx])
531 .collect();
532 let mean_wf: f64 = weighted_f.iter().sum::<f64>() / n_samples as f64;
533 let variance: f64 = weighted_f
534 .iter()
535 .map(|&wf| (wf - mean_wf).powi(2))
536 .sum::<f64>()
537 / (n_samples as f64 - 1.0).max(1.0);
538
539 Ok(ImportanceSamplingResult {
540 estimate,
541 effective_sample_size,
542 variance,
543 n_samples,
544 })
545 }
546
547 fn compute_statistics(&self, outcomes: &[BitString]) -> Result<MeasurementStatistics> {
549 let mut counts = HashMap::new();
550 let total_shots = outcomes.len() as f64;
551
552 for outcome in outcomes {
554 *counts.entry(outcome.clone()).or_insert(0) += 1;
555 }
556
557 let mode = counts.iter().max_by_key(|(_, &count)| count).map_or_else(
559 || BitString::from_int(0, outcomes[0].len()),
560 |(outcome, _)| outcome.clone(),
561 );
562
563 let mut probabilities = HashMap::new();
565 let mut confidence_intervals = HashMap::new();
566 let z_score = self.get_z_score(self.config.confidence_level);
567
568 for (outcome, &count) in &counts {
569 let prob = count as f64 / total_shots;
570 probabilities.insert(outcome.clone(), prob);
571
572 let std_error = (prob * (1.0 - prob) / total_shots).sqrt();
574 let margin = z_score * std_error;
575 confidence_intervals.insert(
576 outcome.clone(),
577 ((prob - margin).max(0.0), (prob + margin).min(1.0)),
578 );
579 }
580
581 let entropy = probabilities
583 .values()
584 .filter(|&&p| p > 0.0)
585 .map(|&p| -p * p.ln())
586 .sum::<f64>();
587
588 let purity = probabilities.values().map(|&p| p * p).sum::<f64>();
590
591 let mean_prob = 1.0 / probabilities.len() as f64;
593 let probability_variance = probabilities
594 .values()
595 .map(|&p| (p - mean_prob).powi(2))
596 .sum::<f64>()
597 / probabilities.len() as f64;
598
599 Ok(MeasurementStatistics {
600 counts,
601 mode,
602 probabilities,
603 probability_variance,
604 confidence_intervals,
605 entropy,
606 purity,
607 })
608 }
609
610 fn get_z_score(&self, confidence_level: f64) -> f64 {
612 match (confidence_level * 100.0) as i32 {
614 90 => 1.645,
615 95 => 1.96,
616 99 => 2.576,
617 _ => 1.96, }
619 }
620
621 pub fn estimate_convergence(
623 &mut self,
624 state: &Array1<Complex64>,
625 observable: &PauliOperatorSum,
626 ) -> Result<ConvergenceResult> {
627 let mut expectation_history = Vec::new();
628 let mut variance_history = Vec::new();
629 let mut shots_taken = 0;
630 let mut converged = false;
631
632 while shots_taken < self.config.max_shots_for_convergence && !converged {
633 let batch_shots = self
635 .config
636 .convergence_check_interval
637 .min(self.config.max_shots_for_convergence - shots_taken);
638
639 let original_shots = self.config.num_shots;
641 self.config.num_shots = batch_shots;
642
643 let result = self.sample_expectation(state, observable)?;
644
645 self.config.num_shots = original_shots;
647
648 expectation_history.push(result.expectation);
649 variance_history.push(result.variance);
650 shots_taken += batch_shots;
651
652 if expectation_history.len() >= 3 {
654 let recent_values = &expectation_history[expectation_history.len() - 3..];
655 let max_diff = recent_values
656 .iter()
657 .zip(recent_values.iter().skip(1))
658 .map(|(a, b)| (a - b).abs())
659 .fold(0.0, f64::max);
660
661 if max_diff < self.config.convergence_tolerance {
662 converged = true;
663 }
664 }
665 }
666
667 let final_expectation = expectation_history.last().copied().unwrap_or(0.0);
669 let expectation_std = if expectation_history.len() > 1 {
670 let mean = expectation_history.iter().sum::<f64>() / expectation_history.len() as f64;
671 (expectation_history
672 .iter()
673 .map(|x| (x - mean).powi(2))
674 .sum::<f64>()
675 / (expectation_history.len() - 1) as f64)
676 .sqrt()
677 } else {
678 0.0
679 };
680
681 Ok(ConvergenceResult {
682 converged,
683 shots_taken,
684 final_expectation,
685 expectation_history,
686 variance_history,
687 convergence_rate: expectation_std,
688 })
689 }
690}
691
692#[derive(Debug, Clone, Serialize, Deserialize)]
694pub struct ExpectationResult {
695 pub expectation: f64,
697 pub variance: f64,
699 pub standard_error: f64,
701 pub confidence_interval: (f64, f64),
703 pub num_shots: usize,
705}
706
707#[derive(Debug, Clone, Serialize, Deserialize)]
709pub struct ConvergenceResult {
710 pub converged: bool,
712 pub shots_taken: usize,
714 pub final_expectation: f64,
716 pub expectation_history: Vec<f64>,
718 pub variance_history: Vec<f64>,
720 pub convergence_rate: f64,
722}
723
724pub trait NoiseModel: Send + Sync {
726 fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>>;
728
729 fn readout_error_probability(&self, qubit: usize) -> f64;
731}
732
733#[derive(Debug, Clone)]
735pub struct SimpleReadoutNoise {
736 pub error_probs: Vec<f64>,
738}
739
740impl SimpleReadoutNoise {
741 #[must_use]
743 pub fn uniform(num_qubits: usize, error_prob: f64) -> Self {
744 Self {
745 error_probs: vec![error_prob; num_qubits],
746 }
747 }
748}
749
750impl NoiseModel for SimpleReadoutNoise {
751 fn apply_readout_noise(&self, state: &Array1<Complex64>) -> Result<Array1<Complex64>> {
752 Ok(state.clone())
754 }
755
756 fn readout_error_probability(&self, qubit: usize) -> f64 {
757 self.error_probs.get(qubit).copied().unwrap_or(0.0)
758 }
759}
760
761pub mod analysis {
763 use super::{ComparisonResult, ShotResult};
764
765 #[must_use]
767 pub fn statistical_power(effect_size: f64, num_shots: usize, significance_level: f64) -> f64 {
768 let standard_error = 1.0 / (num_shots as f64).sqrt();
770 let z_critical = match (significance_level * 100.0) as i32 {
771 1 => 2.576,
772 5 => 1.96,
773 10 => 1.645,
774 _ => 1.96,
775 };
776
777 let z_beta = (effect_size / standard_error) - z_critical;
778 normal_cdf(z_beta)
779 }
780
781 #[must_use]
783 pub fn required_shots_for_precision(desired_error: f64, confidence_level: f64) -> usize {
784 let z_score = match (confidence_level * 100.0) as i32 {
785 90 => 1.645,
786 95 => 1.96,
787 99 => 2.576,
788 _ => 1.96,
789 };
790
791 let n = (z_score * z_score) / (4.0 * desired_error * desired_error);
793 n.ceil() as usize
794 }
795
796 #[must_use]
798 pub fn compare_shot_results(
799 result1: &ShotResult,
800 result2: &ShotResult,
801 significance_level: f64,
802 ) -> ComparisonResult {
803 let mut chi_square = 0.0;
805 let mut degrees_of_freedom: usize = 0;
806
807 let mut all_outcomes = std::collections::HashSet::new();
809 all_outcomes.extend(result1.statistics.counts.keys());
810 all_outcomes.extend(result2.statistics.counts.keys());
811
812 for outcome in &all_outcomes {
813 let count1 = result1.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
814 let count2 = result2.statistics.counts.get(outcome).copied().unwrap_or(0) as f64;
815
816 let total1 = result1.num_shots as f64;
817 let total2 = result2.num_shots as f64;
818
819 let expected1 = (count1 + count2) * total1 / (total1 + total2);
820 let expected2 = (count1 + count2) * total2 / (total1 + total2);
821
822 if expected1 > 5.0 && expected2 > 5.0 {
823 chi_square += (count1 - expected1).powi(2) / expected1;
824 chi_square += (count2 - expected2).powi(2) / expected2;
825 degrees_of_freedom += 1;
826 }
827 }
828
829 degrees_of_freedom = degrees_of_freedom.saturating_sub(1);
830
831 let critical_value = match (significance_level * 100.0) as i32 {
833 1 => 6.635, 5 => 3.841,
835 10 => 2.706,
836 _ => 3.841,
837 };
838
839 ComparisonResult {
840 chi_square,
841 degrees_of_freedom,
842 p_value: if chi_square > critical_value {
843 0.01
844 } else {
845 0.1
846 }, significant: chi_square > critical_value,
848 }
849 }
850
851 fn normal_cdf(x: f64) -> f64 {
853 0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
855 }
856
857 fn erf(x: f64) -> f64 {
859 let a1 = 0.254_829_592;
861 let a2 = -0.284_496_736;
862 let a3 = 1.421_413_741;
863 let a4 = -1.453_152_027;
864 let a5 = 1.061_405_429;
865 let p = 0.327_591_1;
866
867 let sign = if x < 0.0 { -1.0 } else { 1.0 };
868 let x = x.abs();
869
870 let t = 1.0 / (1.0 + p * x);
871 let y = ((a5 * t + a4).mul_add(t, a3).mul_add(t, a2).mul_add(t, a1) * t)
872 .mul_add(-(-x * x).exp(), 1.0);
873
874 sign * y
875 }
876}
877
878#[derive(Debug, Clone, Serialize, Deserialize)]
880pub struct ImportanceSamplingResult {
881 pub estimate: f64,
883 pub effective_sample_size: f64,
885 pub variance: f64,
887 pub n_samples: usize,
889}
890
891#[derive(Debug, Clone, Serialize, Deserialize)]
893pub struct ComparisonResult {
894 pub chi_square: f64,
896 pub degrees_of_freedom: usize,
898 pub p_value: f64,
900 pub significant: bool,
902}
903
904#[cfg(test)]
905#[allow(clippy::field_reassign_with_default)]
906mod tests {
907 use super::*;
908
909 #[test]
910 fn test_bit_string() {
911 let bs = BitString::from_int(5, 4); assert_eq!(bs.bits, vec![1, 0, 1, 0]);
913 assert_eq!(bs.to_int(), 5);
914 assert_eq!(bs.weight(), 2);
915 }
916
917 #[test]
918 fn test_sampler_creation() {
919 let config = SamplingConfig::default();
920 let sampler = QuantumSampler::new(config);
921 assert_eq!(sampler.config.num_shots, 1024);
922 }
923
924 #[test]
925 fn test_uniform_state_sampling() {
926 let mut config = SamplingConfig::default();
927 config.num_shots = 100;
928 config.seed = Some(42);
929
930 let mut sampler = QuantumSampler::new(config);
931
932 let state = Array1::from_vec(vec![
934 Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
935 Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
936 ]);
937
938 let result = sampler
939 .sample_state(&state)
940 .expect("Failed to sample state");
941 assert_eq!(result.num_shots, 100);
942 assert_eq!(result.outcomes.len(), 100);
943
944 let has_zero = result.outcomes.iter().any(|bs| bs.to_int() == 0);
946 let has_one = result.outcomes.iter().any(|bs| bs.to_int() == 1);
947 assert!(has_zero && has_one);
948 }
949
950 #[test]
951 fn test_required_shots_calculation() {
952 let shots = analysis::required_shots_for_precision(0.01, 0.95);
953 assert!(shots > 9000); }
955
956 #[test]
957 fn test_sample_multinomial_basic() {
958 let mut config = SamplingConfig::default();
959 config.seed = Some(123);
960 let mut sampler = QuantumSampler::new(config);
961
962 let probs = vec![1.0, 0.0, 0.0];
964 let counts = sampler
965 .sample_multinomial(&probs, 100)
966 .expect("multinomial sampling should succeed");
967
968 assert_eq!(counts.len(), 3);
969 assert_eq!(counts[0], 100);
970 assert_eq!(counts[1], 0);
971 assert_eq!(counts[2], 0);
972 }
973
974 #[test]
975 fn test_sample_multinomial_uniform() {
976 let mut config = SamplingConfig::default();
977 config.seed = Some(42);
978 let mut sampler = QuantumSampler::new(config);
979
980 let probs = vec![0.5, 0.5];
981 let counts = sampler
982 .sample_multinomial(&probs, 1000)
983 .expect("multinomial sampling should succeed");
984
985 assert_eq!(counts.len(), 2);
986 assert_eq!(counts[0] + counts[1], 1000);
987 assert!(counts[0] > 400, "outcome 0 should appear >40% of the time");
989 assert!(counts[1] > 400, "outcome 1 should appear >40% of the time");
990 }
991
992 #[test]
993 fn test_sample_multinomial_counts_sum_to_n() {
994 let mut config = SamplingConfig::default();
995 config.seed = Some(7);
996 let mut sampler = QuantumSampler::new(config);
997
998 let probs = vec![0.1, 0.3, 0.2, 0.4];
999 let n = 500;
1000 let counts = sampler
1001 .sample_multinomial(&probs, n)
1002 .expect("multinomial sampling should succeed");
1003
1004 let total: usize = counts.iter().sum();
1005 assert_eq!(total, n, "all counts must sum to n_samples");
1006 }
1007
1008 #[test]
1009 fn test_importance_sampling_identity_proposal() {
1010 let mut config = SamplingConfig::default();
1011 config.seed = Some(99);
1012 let mut sampler = QuantumSampler::new(config);
1013
1014 let probs = vec![1.0 / 3.0; 3];
1017 let f = vec![1.0, 2.0, 3.0];
1018
1019 let result = sampler
1020 .importance_sampling(&probs, &probs, &f, 5000)
1021 .expect("importance sampling should succeed");
1022
1023 assert!(
1025 (result.estimate - 2.0).abs() < 0.3,
1026 "IS estimate should be near 2.0, got {}",
1027 result.estimate
1028 );
1029 assert!(
1030 result.effective_sample_size > 100.0,
1031 "ESS should be large for matched proposal"
1032 );
1033 }
1034
1035 #[test]
1036 fn test_importance_sampling_reweighting() {
1037 let mut config = SamplingConfig::default();
1038 config.seed = Some(55);
1039 let mut sampler = QuantumSampler::new(config);
1040
1041 let target = vec![0.9, 0.1];
1044 let proposal = vec![0.5, 0.5];
1045 let f = vec![0.0, 1.0];
1046
1047 let result = sampler
1048 .importance_sampling(&target, &proposal, &f, 10_000)
1049 .expect("importance sampling should succeed");
1050
1051 assert!(
1052 (result.estimate - 0.1).abs() < 0.05,
1053 "IS estimate should be near 0.1, got {}",
1054 result.estimate
1055 );
1056 }
1057}