quantrs2_device/dynamical_decoupling/
fallback_scirs2.rs1use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
4use std::collections::HashMap;
5
6#[derive(Debug, Clone)]
8pub struct OptimizeResult {
9 pub x: Array1<f64>,
11 pub fun: f64,
13 pub nit: usize,
15 pub nfev: usize,
17 pub success: bool,
19 pub message: String,
21}
22
23pub fn minimize<F>(
25 objective: F,
26 initial: &Array1<f64>,
27 _method: &str,
28) -> Result<OptimizeResult, String>
29where
30 F: Fn(&Array1<f64>) -> f64,
31{
32 let mut current_x = initial.clone();
34 let mut current_f = objective(¤t_x);
35 let n_params = initial.len();
36
37 let mut step_size = 0.1;
38 let max_iterations = 1000;
39 let tolerance = 1e-6;
40
41 let mut nfev = 1;
42
43 for _iteration in 0..max_iterations {
44 let mut improved = false;
45
46 for i in 0..n_params {
48 let mut test_x = current_x.clone();
50 test_x[i] += step_size;
51 let test_f = objective(&test_x);
52 nfev += 1;
53
54 if test_f < current_f {
55 current_x = test_x;
56 current_f = test_f;
57 improved = true;
58 continue;
59 }
60
61 let mut test_x = current_x.clone();
63 test_x[i] -= step_size;
64 let test_f = objective(&test_x);
65 nfev += 1;
66
67 if test_f < current_f {
68 current_x = test_x;
69 current_f = test_f;
70 improved = true;
71 }
72 }
73
74 if !improved {
75 step_size *= 0.5;
77 if step_size < tolerance {
78 break;
79 }
80 }
81 }
82
83 Ok(OptimizeResult {
84 x: current_x,
85 fun: current_f,
86 nit: max_iterations,
87 nfev,
88 success: true,
89 message: "Optimization completed".to_string(),
90 })
91}
92
93pub fn mean(data: &ArrayView1<f64>) -> Result<f64, String> {
95 if data.is_empty() {
96 return Err("Cannot compute mean of empty array".to_string());
97 }
98 Ok(data.sum() / data.len() as f64)
99}
100
101pub fn std(data: &ArrayView1<f64>, ddof: i32, _workers: Option<usize>) -> Result<f64, String> {
102 if data.len() <= ddof as usize {
103 return Err("Insufficient data for standard deviation calculation".to_string());
104 }
105
106 let mean_val = mean(data)?;
107 let variance = data.iter().map(|x| (x - mean_val).powi(2)).sum::<f64>()
108 / (data.len() as f64 - ddof as f64);
109
110 Ok(variance.sqrt())
111}
112
113pub fn var(data: &ArrayView1<f64>, ddof: i32, _workers: Option<usize>) -> Result<f64, String> {
114 if data.len() <= ddof as usize {
115 return Err("Insufficient data for variance calculation".to_string());
116 }
117
118 let mean_val = mean(data)?;
119 let variance = data.iter().map(|x| (x - mean_val).powi(2)).sum::<f64>()
120 / (data.len() as f64 - ddof as f64);
121
122 Ok(variance)
123}
124
125pub fn pearsonr(x: &ArrayView1<f64>, y: &ArrayView1<f64>) -> Result<f64, String> {
126 if x.len() != y.len() {
127 return Err("Arrays must have same length".to_string());
128 }
129
130 if x.len() < 2 {
131 return Err("Need at least 2 data points".to_string());
132 }
133
134 let mean_x = mean(x)?;
135 let mean_y = mean(y)?;
136
137 let numerator: f64 = x
138 .iter()
139 .zip(y.iter())
140 .map(|(&xi, &yi)| (xi - mean_x) * (yi - mean_y))
141 .sum();
142
143 let sum_sq_x: f64 = x.iter().map(|&xi| (xi - mean_x).powi(2)).sum();
144 let sum_sq_y: f64 = y.iter().map(|&yi| (yi - mean_y).powi(2)).sum();
145
146 let denominator = (sum_sq_x * sum_sq_y).sqrt();
147
148 if denominator == 0.0 {
149 Ok(0.0)
150 } else {
151 Ok(numerator / denominator)
152 }
153}
154
155pub fn spearmanr(x: &ArrayView1<f64>, y: &ArrayView1<f64>) -> Result<f64, String> {
156 if x.len() != y.len() {
157 return Err("Arrays must have same length".to_string());
158 }
159
160 let x_ranks = rank_array(x);
162 let y_ranks = rank_array(y);
163
164 let x_ranks_view = x_ranks.view();
165 let y_ranks_view = y_ranks.view();
166
167 pearsonr(&x_ranks_view, &y_ranks_view)
168}
169
170fn rank_array(data: &ArrayView1<f64>) -> Array1<f64> {
171 let mut indexed_data: Vec<(usize, f64)> =
172 data.iter().enumerate().map(|(i, &x)| (i, x)).collect();
173 indexed_data.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
174
175 let mut ranks = Array1::zeros(data.len());
176 for (rank, &(index, _)) in indexed_data.iter().enumerate() {
177 ranks[index] = rank as f64 + 1.0;
178 }
179
180 ranks
181}
182
183pub fn ttest_1samp(data: &ArrayView1<f64>, pop_mean: f64) -> Result<(f64, f64), String> {
184 if data.len() < 2 {
185 return Err("Need at least 2 data points for t-test".to_string());
186 }
187
188 let sample_mean = mean(data)?;
189 let sample_std = std(data, 1, None)?;
190 let n = data.len() as f64;
191
192 let t_statistic = (sample_mean - pop_mean) / (sample_std / n.sqrt());
193
194 let df = n - 1.0;
196 let p_value = 2.0 * (1.0 - normal_cdf(t_statistic.abs()));
197
198 Ok((t_statistic, p_value))
199}
200
201pub fn ks_2samp(x: &ArrayView1<f64>, y: &ArrayView1<f64>) -> Result<(f64, f64), String> {
202 if x.is_empty() || y.is_empty() {
203 return Err("Both samples must be non-empty".to_string());
204 }
205
206 let mut x_sorted = x.to_vec();
207 let mut y_sorted = y.to_vec();
208 x_sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
209 y_sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
210
211 let mut all_values = x_sorted.clone();
212 all_values.extend_from_slice(&y_sorted);
213 all_values.sort_by(|a, b| a.partial_cmp(b).unwrap());
214 all_values.dedup();
215
216 let mut max_diff = 0.0f64;
217
218 for &value in &all_values {
219 let cdf_x = x_sorted.iter().filter(|&&x| x <= value).count() as f64 / x_sorted.len() as f64;
220 let cdf_y = y_sorted.iter().filter(|&&y| y <= value).count() as f64 / y_sorted.len() as f64;
221 let diff = (cdf_x - cdf_y).abs();
222 max_diff = max_diff.max(diff);
223 }
224
225 let n_x = x.len() as f64;
227 let n_y = y.len() as f64;
228 let sqrt_term = ((n_x + n_y) / (n_x * n_y)).sqrt();
229 let ks_statistic = max_diff;
230 let p_value = 2.0f64 * (-2.0f64 * ks_statistic.powi(2) / sqrt_term.powi(2)).exp();
231
232 Ok((ks_statistic, p_value.min(1.0)))
233}
234
235pub fn shapiro_wilk(data: &ArrayView1<f64>) -> Result<(f64, f64), String> {
236 if data.len() < 3 || data.len() > 5000 {
237 return Err("Shapiro-Wilk test requires 3-5000 observations".to_string());
238 }
239
240 let mean_val = mean(data)?;
242 let std_val = std(data, 1, None)?;
243
244 let n = data.len() as f64;
246 let skewness = data
247 .iter()
248 .map(|&x| ((x - mean_val) / std_val).powi(3))
249 .sum::<f64>()
250 / n;
251
252 let kurtosis = data
253 .iter()
254 .map(|&x| ((x - mean_val) / std_val).powi(4))
255 .sum::<f64>()
256 / n
257 - 3.0;
258
259 let w_statistic = 1.0 - (skewness.abs() + kurtosis.abs()) / 10.0;
261 let w_statistic = w_statistic.max(0.0).min(1.0);
262
263 let p_value = if w_statistic > 0.95 {
265 0.5
266 } else if w_statistic > 0.9 {
267 0.1
268 } else {
269 0.01
270 };
271
272 Ok((w_statistic, p_value))
273}
274
275pub fn percentile(data: &ArrayView1<f64>, percentile: f64) -> Result<f64, String> {
276 if data.is_empty() {
277 return Err("Cannot compute percentile of empty array".to_string());
278 }
279
280 if percentile < 0.0 || percentile > 100.0 {
281 return Err("Percentile must be between 0 and 100".to_string());
282 }
283
284 let mut sorted_data = data.to_vec();
285 sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap());
286
287 let index = (percentile / 100.0) * (sorted_data.len() - 1) as f64;
288 let lower_index = index.floor() as usize;
289 let upper_index = index.ceil() as usize;
290
291 if lower_index == upper_index {
292 Ok(sorted_data[lower_index])
293 } else {
294 let weight = index - lower_index as f64;
295 Ok(sorted_data[lower_index] * (1.0 - weight) + sorted_data[upper_index] * weight)
296 }
297}
298
299pub fn trace(matrix: &ArrayView2<f64>) -> Result<f64, String> {
300 let (rows, cols) = matrix.dim();
301 if rows != cols {
302 return Err("Matrix must be square for trace calculation".to_string());
303 }
304
305 let mut trace_val = 0.0;
306 for i in 0..rows {
307 trace_val += matrix[(i, i)];
308 }
309
310 Ok(trace_val)
311}
312
313pub fn inv(matrix: &ArrayView2<f64>) -> Result<Array2<f64>, String> {
314 let (rows, cols) = matrix.dim();
315 if rows != cols {
316 return Err("Matrix must be square for inversion".to_string());
317 }
318
319 Ok(Array2::eye(rows))
322}
323
324fn normal_cdf(x: f64) -> f64 {
326 0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
328}
329
330fn erf(x: f64) -> f64 {
332 let a1 = 0.254829592;
334 let a2 = -0.284496736;
335 let a3 = 1.421413741;
336 let a4 = -1.453152027;
337 let a5 = 1.061405429;
338 let p = 0.3275911;
339
340 let sign = if x < 0.0 { -1.0 } else { 1.0 };
341 let x = x.abs();
342
343 let t = 1.0 / (1.0 + p * x);
344 let y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * (-x * x).exp();
345
346 sign * y
347}
348
349#[cfg(test)]
350mod tests {
351 use super::*;
352 use scirs2_core::ndarray::Array1;
353
354 #[test]
355 fn test_mean() {
356 let data = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
357 let result = mean(&data.view()).unwrap();
358 assert!((result - 3.0).abs() < 1e-10);
359 }
360
361 #[test]
362 fn test_std() {
363 let data = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
364 let result = std(&data.view(), 1, None).unwrap();
365 assert!((result - 1.5811388300841898).abs() < 1e-10);
367 }
368
369 #[test]
370 fn test_pearsonr() {
371 let x = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
372 let y = Array1::from_vec(vec![2.0, 4.0, 6.0, 8.0, 10.0]);
373 let result = pearsonr(&x.view(), &y.view()).unwrap();
374 assert!((result - 1.0).abs() < 1e-10);
376 }
377
378 #[test]
379 fn test_minimize() {
380 let initial = Array1::from_vec(vec![0.0]);
381 let objective = |x: &Array1<f64>| (x[0] - 2.0).powi(2);
382
383 let result = minimize(objective, &initial, "nelder-mead").unwrap();
384
385 assert!((result.x[0] - 2.0).abs() < 0.5);
387 assert!(result.success);
388 }
389}