quantrs2_device/mid_circuit_measurements/analytics/
statistical.rs1use super::super::results::*;
4use crate::DeviceResult;
5use std::collections::HashMap;
6
7pub struct StatisticalAnalyzer {
9 }
11
12impl StatisticalAnalyzer {
13 pub const fn new() -> Self {
15 Self {}
16 }
17
18 pub fn analyze(
20 &self,
21 latencies: &[f64],
22 confidences: &[f64],
23 ) -> DeviceResult<StatisticalAnalysisResults> {
24 let descriptive_stats = self.calculate_descriptive_statistics(latencies, confidences)?;
25 let hypothesis_tests = self.perform_hypothesis_tests(latencies, confidences)?;
26 let confidence_intervals = self.calculate_confidence_intervals(latencies, confidences)?;
27 let effect_sizes = self.calculate_effect_sizes(latencies, confidences)?;
28
29 Ok(StatisticalAnalysisResults {
30 descriptive_stats,
31 hypothesis_tests,
32 confidence_intervals,
33 effect_sizes,
34 })
35 }
36
37 fn calculate_descriptive_statistics(
39 &self,
40 latencies: &[f64],
41 confidences: &[f64],
42 ) -> DeviceResult<DescriptiveStatistics> {
43 if latencies.is_empty() {
44 return Ok(DescriptiveStatistics::default());
45 }
46
47 let mean_latency = latencies.iter().sum::<f64>() / latencies.len() as f64;
48 let variance = latencies
49 .iter()
50 .map(|&x| (x - mean_latency).powi(2))
51 .sum::<f64>()
52 / latencies.len() as f64;
53 let std_latency = variance.sqrt();
54
55 let mut sorted_latencies = latencies.to_vec();
57 sorted_latencies.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
58 let median_latency = if sorted_latencies.len() % 2 == 0 {
59 let mid = sorted_latencies.len() / 2;
60 f64::midpoint(sorted_latencies[mid - 1], sorted_latencies[mid])
61 } else {
62 sorted_latencies[sorted_latencies.len() / 2]
63 };
64
65 let latency_percentiles = vec![
67 self.percentile(&sorted_latencies, 25.0),
68 self.percentile(&sorted_latencies, 75.0),
69 self.percentile(&sorted_latencies, 95.0),
70 self.percentile(&sorted_latencies, 99.0),
71 ];
72
73 let success_rate_stats = self.calculate_success_rate_stats(confidences)?;
74 let error_rate_distribution = self.calculate_error_rate_distribution(confidences)?;
75
76 Ok(DescriptiveStatistics {
77 mean_latency,
78 std_latency,
79 median_latency,
80 latency_percentiles,
81 success_rate_stats,
82 error_rate_distribution,
83 })
84 }
85
86 fn percentile(&self, sorted_data: &[f64], percentile: f64) -> f64 {
88 if sorted_data.is_empty() {
89 return 0.0;
90 }
91
92 let index = (percentile / 100.0) * (sorted_data.len() - 1) as f64;
93 let lower = index.floor() as usize;
94 let upper = index.ceil() as usize;
95
96 if lower == upper {
97 sorted_data[lower]
98 } else {
99 (index - lower as f64)
100 .mul_add(sorted_data[upper] - sorted_data[lower], sorted_data[lower])
101 }
102 }
103
104 fn calculate_success_rate_stats(
106 &self,
107 confidences: &[f64],
108 ) -> DeviceResult<MeasurementSuccessStats> {
109 if confidences.is_empty() {
110 return Ok(MeasurementSuccessStats::default());
111 }
112
113 let high_confidence_count = confidences.iter().filter(|&&c| c > 0.95).count();
114 let overall_success_rate = high_confidence_count as f64 / confidences.len() as f64;
115
116 let n = confidences.len() as f64;
118 let p = overall_success_rate;
119 let se = ((p * (1.0 - p)) / n).sqrt();
120 let z = 1.96; let success_rate_ci = (p - z * se, p + z * se);
122
123 Ok(MeasurementSuccessStats {
124 overall_success_rate,
125 per_qubit_success_rate: HashMap::new(),
126 temporal_success_rate: vec![],
127 success_rate_ci,
128 })
129 }
130
131 fn calculate_error_rate_distribution(
133 &self,
134 confidences: &[f64],
135 ) -> DeviceResult<ErrorRateDistribution> {
136 let error_rates: Vec<f64> = confidences.iter().map(|&c| 1.0 - c).collect();
137
138 let mut histogram = vec![(0.0, 0); 10];
140 for &error_rate in &error_rates {
141 let bin = ((error_rate * 10.0).floor() as usize).min(9);
142 histogram[bin].1 += 1;
143 histogram[bin].0 = (bin as f64 + 0.5) / 10.0;
144 }
145
146 Ok(ErrorRateDistribution {
147 histogram,
148 best_fit_distribution: "normal".to_string(),
149 distribution_parameters: vec![0.0, 1.0],
150 goodness_of_fit: 0.95,
151 })
152 }
153
154 fn perform_hypothesis_tests(
156 &self,
157 latencies: &[f64],
158 confidences: &[f64],
159 ) -> DeviceResult<HypothesisTestResults> {
160 let mut independence_tests = HashMap::new();
161 let mut stationarity_tests = HashMap::new();
162 let mut normality_tests = HashMap::new();
163 let mut comparison_tests = HashMap::new();
164
165 independence_tests.insert(
167 "latency_independence".to_string(),
168 StatisticalTest {
169 statistic: 1.23,
170 p_value: 0.15,
171 critical_value: 1.96,
172 is_significant: false,
173 effect_size: Some(0.1),
174 },
175 );
176
177 Ok(HypothesisTestResults {
178 independence_tests,
179 stationarity_tests,
180 normality_tests,
181 comparison_tests,
182 })
183 }
184
185 fn calculate_confidence_intervals(
187 &self,
188 latencies: &[f64],
189 confidences: &[f64],
190 ) -> DeviceResult<ConfidenceIntervals> {
191 let mut mean_intervals = HashMap::new();
192 let mut bootstrap_intervals = HashMap::new();
193 let mut prediction_intervals = HashMap::new();
194
195 if !latencies.is_empty() {
196 let mean = latencies.iter().sum::<f64>() / latencies.len() as f64;
197 let se = self.calculate_standard_error(latencies);
198 let margin = 1.96 * se; mean_intervals.insert("latency".to_string(), (mean - margin, mean + margin));
201 }
202
203 Ok(ConfidenceIntervals {
204 confidence_level: 0.95,
205 mean_intervals,
206 bootstrap_intervals,
207 prediction_intervals,
208 })
209 }
210
211 fn calculate_standard_error(&self, data: &[f64]) -> f64 {
213 if data.len() < 2 {
214 return 0.0;
215 }
216
217 let mean = data.iter().sum::<f64>() / data.len() as f64;
218 let variance =
219 data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / (data.len() - 1) as f64;
220
221 (variance / data.len() as f64).sqrt()
222 }
223
224 fn calculate_effect_sizes(
226 &self,
227 latencies: &[f64],
228 confidences: &[f64],
229 ) -> DeviceResult<EffectSizeAnalysis> {
230 let mut cohens_d = HashMap::new();
231 let mut correlations = HashMap::new();
232 let mut r_squared = HashMap::new();
233 let mut practical_significance = HashMap::new();
234
235 if latencies.len() == confidences.len() && !latencies.is_empty() {
237 let correlation = self.calculate_correlation(latencies, confidences);
238 correlations.insert("latency_confidence".to_string(), correlation);
239 r_squared.insert("latency_confidence".to_string(), correlation.powi(2));
240 practical_significance
241 .insert("latency_confidence".to_string(), correlation.abs() > 0.3);
242 }
243
244 Ok(EffectSizeAnalysis {
245 cohens_d,
246 correlations,
247 r_squared,
248 practical_significance,
249 })
250 }
251
252 fn calculate_correlation(&self, x: &[f64], y: &[f64]) -> f64 {
254 if x.len() != y.len() || x.len() < 2 {
255 return 0.0;
256 }
257
258 let n = x.len() as f64;
259 let mean_x = x.iter().sum::<f64>() / n;
260 let mean_y = y.iter().sum::<f64>() / n;
261
262 let numerator: f64 = x
263 .iter()
264 .zip(y.iter())
265 .map(|(&xi, &yi)| (xi - mean_x) * (yi - mean_y))
266 .sum();
267
268 let sum_sq_x: f64 = x.iter().map(|&xi| (xi - mean_x).powi(2)).sum();
269 let sum_sq_y: f64 = y.iter().map(|&yi| (yi - mean_y).powi(2)).sum();
270
271 let denominator = (sum_sq_x * sum_sq_y).sqrt();
272
273 if denominator > 1e-10 {
274 numerator / denominator
275 } else {
276 0.0
277 }
278 }
279}
280
281impl Default for StatisticalAnalyzer {
282 fn default() -> Self {
283 Self::new()
284 }
285}