1use std::collections::HashMap;
10
11#[derive(Debug, Clone)]
17pub struct ConfidenceInterval {
18 pub lower: f64,
20 pub upper: f64,
22 pub point_estimate: f64,
24 pub confidence_level: f64,
26 pub method: &'static str,
28}
29
30#[derive(Debug, Clone)]
32pub struct ChiSquaredResult {
33 pub statistic: f64,
35 pub degrees_of_freedom: usize,
37 pub p_value: f64,
39 pub significant: bool,
41}
42
43pub struct ConvergenceMonitor {
45 estimates: Vec<f64>,
46 window_size: usize,
47}
48
49pub fn z_score(confidence: f64) -> f64 {
64 assert!(
65 confidence > 0.0 && confidence < 1.0,
66 "confidence must be in (0, 1)"
67 );
68
69 let p = (1.0 + confidence) / 2.0; let tail = 1.0 - p;
72
73 let t = (-2.0_f64 * tail.ln()).sqrt();
75
76 let c0 = 2.515517;
78 let c1 = 0.802853;
79 let c2 = 0.010328;
80 let d1 = 1.432788;
81 let d2 = 0.189269;
82 let d3 = 0.001308;
83
84 t - (c0 + c1 * t + c2 * t * t) / (1.0 + d1 * t + d2 * t * t + d3 * t * t * t)
85}
86
87pub fn wilson_interval(successes: usize, trials: usize, confidence: f64) -> ConfidenceInterval {
102 assert!(trials > 0, "trials must be > 0");
103 assert!(
104 confidence > 0.0 && confidence < 1.0,
105 "confidence must be in (0, 1)"
106 );
107
108 let n = trials as f64;
109 let p_hat = successes as f64 / n;
110 let z = z_score(confidence);
111 let z2 = z * z;
112
113 let denom = 1.0 + z2 / n;
114 let centre = (p_hat + z2 / (2.0 * n)) / denom;
115 let half_width = z * (p_hat * (1.0 - p_hat) / n + z2 / (4.0 * n * n)).sqrt() / denom;
116
117 let lower = (centre - half_width).max(0.0);
118 let upper = (centre + half_width).min(1.0);
119
120 ConfidenceInterval {
121 lower,
122 upper,
123 point_estimate: p_hat,
124 confidence_level: confidence,
125 method: "wilson",
126 }
127}
128
129pub fn clopper_pearson(successes: usize, trials: usize, confidence: f64) -> ConfidenceInterval {
145 assert!(trials > 0, "trials must be > 0");
146 assert!(
147 confidence > 0.0 && confidence < 1.0,
148 "confidence must be in (0, 1)"
149 );
150
151 let alpha = 1.0 - confidence;
152 let n = trials;
153 let k = successes;
154 let p_hat = k as f64 / n as f64;
155
156 let lower = if k == 0 {
159 0.0
160 } else {
161 bisect_binomial_cdf(n, k - 1, 1.0 - alpha / 2.0)
162 };
163
164 let upper = if k == n {
166 1.0
167 } else {
168 bisect_binomial_cdf(n, k, alpha / 2.0)
169 };
170
171 ConfidenceInterval {
172 lower,
173 upper,
174 point_estimate: p_hat,
175 confidence_level: confidence,
176 method: "clopper-pearson",
177 }
178}
179
180fn bisect_binomial_cdf(n: usize, k: usize, target: f64) -> f64 {
184 let mut lo = 0.0_f64;
185 let mut hi = 1.0_f64;
186
187 for _ in 0..200 {
188 let mid = (lo + hi) / 2.0;
189 let cdf = binomial_cdf(n, k, mid);
190 if cdf < target {
191 hi = mid;
197 } else {
198 lo = mid;
199 }
200
201 if (hi - lo) < 1e-15 {
202 break;
203 }
204 }
205 (lo + hi) / 2.0
206}
207
208fn binomial_cdf(n: usize, k: usize, p: f64) -> f64 {
212 if p <= 0.0 {
213 return 1.0;
214 }
215 if p >= 1.0 {
216 return if k >= n { 1.0 } else { 0.0 };
217 }
218 if k >= n {
219 return 1.0;
220 }
221
222 let mut cdf = 0.0_f64;
229 let ln_p = p.ln();
231 let ln_1mp = (1.0 - p).ln();
232
233 let mut log_binom = 0.0_f64; cdf += (log_binom + ln_1mp * n as f64).exp();
236
237 for i in 1..=k {
238 log_binom += ((n - i + 1) as f64).ln() - (i as f64).ln();
240 let log_term = log_binom + ln_p * i as f64 + ln_1mp * (n - i) as f64;
241 cdf += log_term.exp();
242 }
243
244 cdf.min(1.0).max(0.0)
245}
246
247pub fn expectation_confidence(
265 counts: &HashMap<Vec<bool>, usize>,
266 qubit: u32,
267 confidence: f64,
268) -> ConfidenceInterval {
269 assert!(
270 confidence > 0.0 && confidence < 1.0,
271 "confidence must be in (0, 1)"
272 );
273
274 let mut n_zero: usize = 0;
275 let mut n_one: usize = 0;
276
277 for (bits, &count) in counts {
278 if let Some(&b) = bits.get(qubit as usize) {
279 if b {
280 n_one += count;
281 } else {
282 n_zero += count;
283 }
284 }
285 }
286
287 let total = (n_zero + n_one) as f64;
288 assert!(total > 0.0, "no shots found for the given qubit");
289
290 let p0 = n_zero as f64 / total;
291 let p1 = n_one as f64 / total;
292 let exp_z = p0 - p1; let var_single = 1.0 - exp_z * exp_z;
297 let se = (var_single / total).sqrt();
298
299 let z = z_score(confidence);
300 let lower = (exp_z - z * se).max(-1.0);
301 let upper = (exp_z + z * se).min(1.0);
302
303 ConfidenceInterval {
304 lower,
305 upper,
306 point_estimate: exp_z,
307 confidence_level: confidence,
308 method: "expectation-z-se",
309 }
310}
311
312pub fn required_shots(epsilon: f64, delta: f64) -> usize {
326 assert!(
327 epsilon > 0.0 && epsilon < 1.0,
328 "epsilon must be in (0, 1)"
329 );
330 assert!(delta > 0.0 && delta < 1.0, "delta must be in (0, 1)");
331
332 let n = (2.0_f64 / delta).ln() / (2.0 * epsilon * epsilon);
333 n.ceil() as usize
334}
335
336pub fn total_variation_distance(
346 p: &HashMap<Vec<bool>, usize>,
347 q: &HashMap<Vec<bool>, usize>,
348) -> f64 {
349 let total_p: f64 = p.values().sum::<usize>() as f64;
350 let total_q: f64 = q.values().sum::<usize>() as f64;
351
352 if total_p == 0.0 && total_q == 0.0 {
353 return 0.0;
354 }
355
356 let mut all_keys: Vec<&Vec<bool>> = Vec::new();
358 for key in p.keys() {
359 all_keys.push(key);
360 }
361 for key in q.keys() {
362 if !p.contains_key(key) {
363 all_keys.push(key);
364 }
365 }
366
367 let mut tvd = 0.0_f64;
368 for key in &all_keys {
369 let pi = if total_p > 0.0 {
370 *p.get(*key).unwrap_or(&0) as f64 / total_p
371 } else {
372 0.0
373 };
374 let qi = if total_q > 0.0 {
375 *q.get(*key).unwrap_or(&0) as f64 / total_q
376 } else {
377 0.0
378 };
379 tvd += (pi - qi).abs();
380 }
381
382 0.5 * tvd
383}
384
385pub fn chi_squared_test(
401 observed: &HashMap<Vec<bool>, usize>,
402 expected: &HashMap<Vec<bool>, usize>,
403) -> ChiSquaredResult {
404 let total_observed: f64 = observed.values().sum::<usize>() as f64;
405 let total_expected: f64 = expected.values().sum::<usize>() as f64;
406
407 assert!(
408 total_expected > 0.0,
409 "expected distribution must have nonzero total"
410 );
411
412 let mut all_keys: Vec<&Vec<bool>> = Vec::new();
414 for key in observed.keys() {
415 all_keys.push(key);
416 }
417 for key in expected.keys() {
418 if !observed.contains_key(key) {
419 all_keys.push(key);
420 }
421 }
422
423 let mut statistic = 0.0_f64;
424 let mut num_categories = 0_usize;
425
426 for key in &all_keys {
427 let o = *observed.get(*key).unwrap_or(&0) as f64;
428 let e_raw = *expected.get(*key).unwrap_or(&0) as f64;
430 let e = e_raw * total_observed / total_expected;
431
432 if e > 0.0 {
433 statistic += (o - e) * (o - e) / e;
434 num_categories += 1;
435 }
436 }
437
438 let df = if num_categories > 1 {
439 num_categories - 1
440 } else {
441 1
442 };
443
444 let p_value = chi_squared_survival(statistic, df);
445
446 ChiSquaredResult {
447 statistic,
448 degrees_of_freedom: df,
449 p_value,
450 significant: p_value < 0.05,
451 }
452}
453
454fn chi_squared_survival(x: f64, df: usize) -> f64 {
465 if df == 0 {
466 return if x > 0.0 { 0.0 } else { 1.0 };
467 }
468
469 if x <= 0.0 {
470 return 1.0;
471 }
472
473 let k = df as f64;
474 let term = 2.0 / (9.0 * k);
475 let cube_root = (x / k).powf(1.0 / 3.0);
476 let z = (cube_root - (1.0 - term)) / term.sqrt();
477
478 normal_cdf(-z)
480}
481
482fn normal_cdf(x: f64) -> f64 {
485 let sign = if x < 0.0 { -1.0 } else { 1.0 };
488 let x_abs = x.abs();
489
490 let t = 1.0 / (1.0 + 0.2316419 * x_abs);
491 let d = 0.3989422804014327; let p = d * (-x_abs * x_abs / 2.0).exp();
493
494 let poly = t
495 * (0.319381530
496 + t * (-0.356563782
497 + t * (1.781477937 + t * (-1.821255978 + t * 1.330274429))));
498
499 if sign > 0.0 {
500 1.0 - p * poly
501 } else {
502 p * poly
503 }
504}
505
506impl ConvergenceMonitor {
511 pub fn new(window_size: usize) -> Self {
516 assert!(window_size > 0, "window_size must be > 0");
517 Self {
518 estimates: Vec::new(),
519 window_size,
520 }
521 }
522
523 pub fn add_estimate(&mut self, value: f64) {
525 self.estimates.push(value);
526 }
527
528 pub fn has_converged(&self, epsilon: f64) -> bool {
531 if self.estimates.len() < self.window_size {
532 return false;
533 }
534
535 let window = &self.estimates[self.estimates.len() - self.window_size..];
536 let min = window
537 .iter()
538 .copied()
539 .fold(f64::INFINITY, f64::min);
540 let max = window
541 .iter()
542 .copied()
543 .fold(f64::NEG_INFINITY, f64::max);
544
545 (max - min) < epsilon
546 }
547
548 pub fn current_estimate(&self) -> Option<f64> {
551 self.estimates.last().copied()
552 }
553}
554
555#[cfg(test)]
560mod tests {
561 use super::*;
562
563 #[test]
568 fn z_score_95() {
569 let z = z_score(0.95);
570 assert!(
571 (z - 1.96).abs() < 0.01,
572 "z_score(0.95) = {z}, expected ~1.96"
573 );
574 }
575
576 #[test]
577 fn z_score_99() {
578 let z = z_score(0.99);
579 assert!(
580 (z - 2.576).abs() < 0.02,
581 "z_score(0.99) = {z}, expected ~2.576"
582 );
583 }
584
585 #[test]
586 fn z_score_90() {
587 let z = z_score(0.90);
588 assert!(
589 (z - 1.645).abs() < 0.01,
590 "z_score(0.90) = {z}, expected ~1.645"
591 );
592 }
593
594 #[test]
599 fn wilson_contains_true_proportion() {
600 let ci = wilson_interval(50, 100, 0.95);
602 assert!(ci.lower < 0.5 && ci.upper > 0.5, "Wilson CI should contain 0.5: {ci:?}");
603 assert_eq!(ci.method, "wilson");
604 assert!((ci.point_estimate - 0.5).abs() < 1e-12);
605 }
606
607 #[test]
608 fn wilson_asymmetric() {
609 let ci = wilson_interval(1, 100, 0.95);
611 assert!(ci.lower >= 0.0);
612 assert!(ci.upper <= 1.0);
613 assert!(ci.lower < 0.01);
614 assert!(ci.upper > 0.01);
615 }
616
617 #[test]
618 fn wilson_zero_successes() {
619 let ci = wilson_interval(0, 100, 0.95);
620 assert_eq!(ci.lower, 0.0);
621 assert!(ci.upper > 0.0);
622 assert!((ci.point_estimate - 0.0).abs() < 1e-12);
623 }
624
625 #[test]
630 fn clopper_pearson_contains_true_proportion() {
631 let ci = clopper_pearson(50, 100, 0.95);
632 assert!(
633 ci.lower < 0.5 && ci.upper > 0.5,
634 "Clopper-Pearson CI should contain 0.5: {ci:?}"
635 );
636 assert_eq!(ci.method, "clopper-pearson");
637 }
638
639 #[test]
640 fn clopper_pearson_is_conservative() {
641 let cp = clopper_pearson(50, 100, 0.95);
643 let w = wilson_interval(50, 100, 0.95);
644
645 let cp_width = cp.upper - cp.lower;
646 let w_width = w.upper - w.lower;
647
648 assert!(
649 cp_width >= w_width - 1e-10,
650 "Clopper-Pearson width ({cp_width}) should be >= Wilson width ({w_width})"
651 );
652 }
653
654 #[test]
655 fn clopper_pearson_edge_zero() {
656 let ci = clopper_pearson(0, 100, 0.95);
657 assert_eq!(ci.lower, 0.0);
658 assert!(ci.upper > 0.0);
659 }
660
661 #[test]
662 fn clopper_pearson_edge_all() {
663 let ci = clopper_pearson(100, 100, 0.95);
664 assert_eq!(ci.upper, 1.0);
665 assert!(ci.lower < 1.0);
666 }
667
668 #[test]
673 fn expectation_all_zero() {
674 let mut counts = HashMap::new();
676 counts.insert(vec![false], 1000);
677 let ci = expectation_confidence(&counts, 0, 0.95);
678 assert!((ci.point_estimate - 1.0).abs() < 1e-12);
679 assert!(ci.lower <= 1.0);
680 assert!(ci.upper >= 1.0 - 1e-6);
681 }
682
683 #[test]
684 fn expectation_all_one() {
685 let mut counts = HashMap::new();
687 counts.insert(vec![true], 1000);
688 let ci = expectation_confidence(&counts, 0, 0.95);
689 assert!((ci.point_estimate - (-1.0)).abs() < 1e-12);
690 }
691
692 #[test]
693 fn expectation_balanced() {
694 let mut counts = HashMap::new();
696 counts.insert(vec![false], 500);
697 counts.insert(vec![true], 500);
698 let ci = expectation_confidence(&counts, 0, 0.95);
699 assert!(
700 ci.point_estimate.abs() < 1e-12,
701 "expected 0.0, got {}",
702 ci.point_estimate
703 );
704 assert!(ci.lower < 0.0);
705 assert!(ci.upper > 0.0);
706 }
707
708 #[test]
709 fn expectation_multi_qubit() {
710 let mut counts = HashMap::new();
712 counts.insert(vec![false, true], 1000);
713 let ci0 = expectation_confidence(&counts, 0, 0.95);
714 let ci1 = expectation_confidence(&counts, 1, 0.95);
715 assert!((ci0.point_estimate - 1.0).abs() < 1e-12);
716 assert!((ci1.point_estimate - (-1.0)).abs() < 1e-12);
717 }
718
719 #[test]
724 fn required_shots_standard() {
725 let n = required_shots(0.01, 0.05);
726 assert!(
728 (n as i64 - 18445).abs() <= 1,
729 "required_shots(0.01, 0.05) = {n}, expected ~18445"
730 );
731 }
732
733 #[test]
734 fn required_shots_loose() {
735 let n = required_shots(0.1, 0.1);
736 assert!(n >= 149 && n <= 151, "expected ~150, got {n}");
738 }
739
740 #[test]
745 fn tvd_identical() {
746 let mut p = HashMap::new();
747 p.insert(vec![false, false], 250);
748 p.insert(vec![false, true], 250);
749 p.insert(vec![true, false], 250);
750 p.insert(vec![true, true], 250);
751
752 let tvd = total_variation_distance(&p, &p);
753 assert!(tvd.abs() < 1e-12, "TVD of identical distributions should be 0, got {tvd}");
754 }
755
756 #[test]
757 fn tvd_completely_different() {
758 let mut p = HashMap::new();
759 p.insert(vec![false], 1000);
760
761 let mut q = HashMap::new();
762 q.insert(vec![true], 1000);
763
764 let tvd = total_variation_distance(&p, &q);
765 assert!(
766 (tvd - 1.0).abs() < 1e-12,
767 "TVD of completely different distributions should be 1.0, got {tvd}"
768 );
769 }
770
771 #[test]
772 fn tvd_partial_overlap() {
773 let mut p = HashMap::new();
774 p.insert(vec![false], 600);
775 p.insert(vec![true], 400);
776
777 let mut q = HashMap::new();
778 q.insert(vec![false], 400);
779 q.insert(vec![true], 600);
780
781 let tvd = total_variation_distance(&p, &q);
782 assert!(
784 (tvd - 0.2).abs() < 1e-12,
785 "expected 0.2, got {tvd}"
786 );
787 }
788
789 #[test]
790 fn tvd_empty() {
791 let p: HashMap<Vec<bool>, usize> = HashMap::new();
792 let q: HashMap<Vec<bool>, usize> = HashMap::new();
793 let tvd = total_variation_distance(&p, &q);
794 assert!(tvd.abs() < 1e-12);
795 }
796
797 #[test]
802 fn chi_squared_matching() {
803 let mut obs = HashMap::new();
805 obs.insert(vec![false, false], 250);
806 obs.insert(vec![false, true], 250);
807 obs.insert(vec![true, false], 250);
808 obs.insert(vec![true, true], 250);
809
810 let result = chi_squared_test(&obs, &obs);
811 assert!(
812 result.statistic < 1e-12,
813 "statistic should be ~0 for identical distributions, got {}",
814 result.statistic
815 );
816 assert!(
817 result.p_value > 0.05,
818 "p-value should be high for matching distributions, got {}",
819 result.p_value
820 );
821 assert!(!result.significant);
822 }
823
824 #[test]
825 fn chi_squared_very_different() {
826 let mut obs = HashMap::new();
827 obs.insert(vec![false], 1000);
828 obs.insert(vec![true], 0);
829
830 let mut exp = HashMap::new();
831 exp.insert(vec![false], 500);
832 exp.insert(vec![true], 500);
833
834 let result = chi_squared_test(&obs, &exp);
835 assert!(result.statistic > 100.0, "statistic should be large");
836 assert!(result.p_value < 0.05, "p-value should be small: {}", result.p_value);
837 assert!(result.significant);
838 }
839
840 #[test]
841 fn chi_squared_degrees_of_freedom() {
842 let mut obs = HashMap::new();
843 obs.insert(vec![false, false], 100);
844 obs.insert(vec![false, true], 100);
845 obs.insert(vec![true, false], 100);
846 obs.insert(vec![true, true], 100);
847
848 let result = chi_squared_test(&obs, &obs);
849 assert_eq!(result.degrees_of_freedom, 3);
850 }
851
852 #[test]
857 fn convergence_detects_stable() {
858 let mut monitor = ConvergenceMonitor::new(5);
859 for &v in &[0.5, 0.52, 0.49, 0.501, 0.499, 0.5001, 0.4999, 0.5002, 0.4998, 0.5001] {
861 monitor.add_estimate(v);
862 }
863 assert!(
864 monitor.has_converged(0.01),
865 "should have converged: last 5 values are within 0.01"
866 );
867 }
868
869 #[test]
870 fn convergence_rejects_unstable() {
871 let mut monitor = ConvergenceMonitor::new(5);
872 for &v in &[0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9] {
873 monitor.add_estimate(v);
874 }
875 assert!(
876 !monitor.has_converged(0.01),
877 "should NOT have converged: values oscillate widely"
878 );
879 }
880
881 #[test]
882 fn convergence_insufficient_data() {
883 let mut monitor = ConvergenceMonitor::new(10);
884 monitor.add_estimate(1.0);
885 monitor.add_estimate(1.0);
886 assert!(
887 !monitor.has_converged(0.1),
888 "not enough data for window_size=10"
889 );
890 }
891
892 #[test]
893 fn convergence_current_estimate() {
894 let mut monitor = ConvergenceMonitor::new(3);
895 assert_eq!(monitor.current_estimate(), None);
896 monitor.add_estimate(42.0);
897 assert_eq!(monitor.current_estimate(), Some(42.0));
898 monitor.add_estimate(43.0);
899 assert_eq!(monitor.current_estimate(), Some(43.0));
900 }
901
902 #[test]
907 fn binomial_cdf_edge_cases() {
908 let c = binomial_cdf(10, 10, 0.5);
910 assert!((c - 1.0).abs() < 1e-12);
911
912 let c = binomial_cdf(10, 0, 0.5);
914 assert!((c - 0.0009765625).abs() < 1e-8);
915 }
916
917 #[test]
922 fn normal_cdf_values() {
923 assert!((normal_cdf(0.0) - 0.5).abs() < 1e-6);
925
926 assert!((normal_cdf(1.96) - 0.975).abs() < 0.002);
928
929 assert!((normal_cdf(-1.96) - 0.025).abs() < 0.002);
931 }
932}