1use crate::fdata::mean_1d;
15use crate::iter_maybe_parallel;
16use crate::matrix::FdMatrix;
17use crate::regression::fdata_to_pc_1d;
18use crate::smoothing::local_polynomial;
19use rand::prelude::*;
20use rand_distr::StandardNormal;
21#[cfg(feature = "parallel")]
22use rayon::iter::ParallelIterator;
23
24#[derive(Debug, Clone)]
28pub struct ToleranceBand {
29 pub lower: Vec<f64>,
31 pub upper: Vec<f64>,
33 pub center: Vec<f64>,
35 pub half_width: Vec<f64>,
37}
38
39#[derive(Debug, Clone, Copy, PartialEq, Eq)]
41pub enum BandType {
42 Pointwise,
44 Simultaneous,
46}
47
48#[derive(Debug, Clone, Copy, PartialEq, Eq)]
50pub enum NonConformityScore {
51 SupNorm,
53 L2,
55}
56
57#[derive(Debug, Clone, Copy, PartialEq, Eq)]
59pub enum MultiplierDistribution {
60 Gaussian,
62 Rademacher,
64}
65
66#[derive(Debug, Clone, Copy, PartialEq, Eq)]
68pub enum EquivalenceBootstrap {
69 Multiplier(MultiplierDistribution),
71 Percentile,
73}
74
75#[derive(Debug, Clone)]
77pub struct EquivalenceTestResult {
78 pub test_statistic: f64,
80 pub p_value: f64,
82 pub critical_value: f64,
84 pub scb: ToleranceBand,
86 pub equivalent: bool,
88 pub delta: f64,
90 pub alpha: f64,
92}
93
94#[derive(Debug, Clone, Copy, PartialEq, Eq)]
96pub enum ExponentialFamily {
97 Gaussian,
99 Binomial,
101 Poisson,
103}
104
105fn pointwise_mean_std(data: &FdMatrix) -> (Vec<f64>, Vec<f64>) {
109 let (n, m) = data.shape();
110 let nf = n as f64;
111 let mut means = vec![0.0; m];
112 let mut stds = vec![0.0; m];
113
114 for j in 0..m {
115 let col = data.column(j);
116 let mean = col.iter().sum::<f64>() / nf;
117 means[j] = mean;
118 let var = col.iter().map(|&v| (v - mean) * (v - mean)).sum::<f64>() / (nf - 1.0);
119 stds[j] = var.sqrt();
120 }
121 (means, stds)
122}
123
124fn normal_quantile(p: f64) -> f64 {
126 if p <= 0.0 || p >= 1.0 {
127 return f64::NAN;
128 }
129 if (p - 0.5).abs() < 1e-15 {
130 return 0.0;
131 }
132
133 let (sign, q) = if p < 0.5 { (-1.0, 1.0 - p) } else { (1.0, p) };
135
136 let t = (-2.0 * (1.0 - q).ln()).sqrt();
137
138 const C0: f64 = 2.515517;
140 const C1: f64 = 0.802853;
141 const C2: f64 = 0.010328;
142 const D1: f64 = 1.432788;
143 const D2: f64 = 0.189269;
144 const D3: f64 = 0.001308;
145
146 let numerator = C0 + C1 * t + C2 * t * t;
147 let denominator = 1.0 + D1 * t + D2 * t * t + D3 * t * t * t;
148
149 sign * (t - numerator / denominator)
150}
151
152fn build_band(center: Vec<f64>, half_width: Vec<f64>) -> ToleranceBand {
154 let lower: Vec<f64> = center
155 .iter()
156 .zip(half_width.iter())
157 .map(|(&c, &h)| c - h)
158 .collect();
159 let upper: Vec<f64> = center
160 .iter()
161 .zip(half_width.iter())
162 .map(|(&c, &h)| c + h)
163 .collect();
164 ToleranceBand {
165 lower,
166 upper,
167 center,
168 half_width,
169 }
170}
171
172fn percentile_sorted(sorted: &mut [f64], p: f64) -> f64 {
174 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
175 let idx = ((sorted.len() as f64 * p).ceil() as usize).min(sorted.len()) - 1;
176 sorted[idx]
177}
178
179fn valid_band_params(n: usize, m: usize, ncomp: usize, nb: usize, coverage: f64) -> bool {
181 n >= 3 && m > 0 && ncomp > 0 && nb > 0 && coverage > 0.0 && coverage < 1.0
182}
183
184struct ScoreStats {
188 means: Vec<f64>,
189 stds: Vec<f64>,
190}
191
192fn compute_score_stats(scores: &FdMatrix, n: usize) -> ScoreStats {
194 let ncomp = scores.ncols();
195 let mut means = vec![0.0; ncomp];
196 let mut stds = vec![0.0; ncomp];
197 for k in 0..ncomp {
198 let col = scores.column(k);
199 let mean = col.iter().sum::<f64>() / n as f64;
200 means[k] = mean;
201 let var = col.iter().map(|&v| (v - mean) * (v - mean)).sum::<f64>() / (n as f64 - 1.0);
202 stds[k] = var.sqrt().max(1e-15);
203 }
204 ScoreStats { means, stds }
205}
206
207fn sample_bootstrap_curve(
209 rng: &mut StdRng,
210 mean: &[f64],
211 rotation: &FdMatrix,
212 stats: &ScoreStats,
213 curve: &mut [f64],
214) {
215 let m = mean.len();
216 let ncomp = stats.means.len();
217 curve[..m].copy_from_slice(&mean[..m]);
218 for k in 0..ncomp {
219 let z: f64 = rng.sample(StandardNormal);
220 let score = stats.means[k] + stats.stds[k] * z;
221 for j in 0..m {
222 curve[j] += score * rotation[(j, k)];
223 }
224 }
225}
226
227fn fpca_pointwise_boot(
229 fpca: &crate::regression::FpcaResult,
230 stats: &ScoreStats,
231 n: usize,
232 m: usize,
233 nb: usize,
234 coverage: f64,
235 seed: u64,
236) -> ToleranceBand {
237 let boot_stds: Vec<Vec<f64>> = iter_maybe_parallel!(0..nb)
238 .map(|b| {
239 let mut rng = StdRng::seed_from_u64(seed + b as u64);
240 let mut curve = vec![0.0; m];
241 let mut sum = vec![0.0; m];
242 let mut sum_sq = vec![0.0; m];
243 for _ in 0..n {
244 sample_bootstrap_curve(&mut rng, &fpca.mean, &fpca.rotation, stats, &mut curve);
245 for j in 0..m {
246 sum[j] += curve[j];
247 sum_sq[j] += curve[j] * curve[j];
248 }
249 }
250 let nf = n as f64;
251 (0..m)
252 .map(|j| (sum_sq[j] / nf - (sum[j] / nf).powi(2)).max(0.0).sqrt())
253 .collect::<Vec<f64>>()
254 })
255 .collect();
256
257 let z = normal_quantile((1.0 + coverage) / 2.0);
258 let center = fpca.mean.clone();
259 let mut half_width = vec![0.0; m];
260 for j in 0..m {
261 let mut stds_j: Vec<f64> = boot_stds.iter().map(|s| s[j]).collect();
262 let pct = percentile_sorted(&mut stds_j, coverage);
263 half_width[j] = z * pct;
264 }
265 build_band(center, half_width)
266}
267
268fn fpca_simultaneous_boot(
270 data: &FdMatrix,
271 fpca: &crate::regression::FpcaResult,
272 stats: &ScoreStats,
273 n: usize,
274 m: usize,
275 nb: usize,
276 coverage: f64,
277 seed: u64,
278) -> ToleranceBand {
279 let (center, data_std) = pointwise_mean_std(data);
280
281 let mut sup_norms: Vec<f64> = iter_maybe_parallel!(0..nb)
282 .map(|b| {
283 let mut rng = StdRng::seed_from_u64(seed + b as u64);
284 let mut max_dev = 0.0_f64;
285 let mut curve = vec![0.0; m];
286 for _ in 0..n {
287 sample_bootstrap_curve(&mut rng, &fpca.mean, &fpca.rotation, stats, &mut curve);
288 let dev = (0..m)
289 .map(|j| (curve[j] - center[j]).abs() / data_std[j].max(1e-15))
290 .fold(0.0_f64, f64::max);
291 max_dev = max_dev.max(dev);
292 }
293 max_dev
294 })
295 .collect();
296
297 let k_factor = percentile_sorted(&mut sup_norms, coverage);
298 let half_width: Vec<f64> = data_std.iter().map(|&s| k_factor * s).collect();
299 build_band(center, half_width)
300}
301
302pub fn fpca_tolerance_band(
332 data: &FdMatrix,
333 ncomp: usize,
334 nb: usize,
335 coverage: f64,
336 band_type: BandType,
337 seed: u64,
338) -> Option<ToleranceBand> {
339 let (n, m) = data.shape();
340 if !valid_band_params(n, m, ncomp, nb, coverage) {
341 return None;
342 }
343
344 let fpca = fdata_to_pc_1d(data, ncomp)?;
345 let stats = compute_score_stats(&fpca.scores, n);
346
347 Some(match band_type {
348 BandType::Pointwise => fpca_pointwise_boot(&fpca, &stats, n, m, nb, coverage, seed),
349 BandType::Simultaneous => {
350 fpca_simultaneous_boot(data, &fpca, &stats, n, m, nb, coverage, seed)
351 }
352 })
353}
354
355fn nonconformity_score(
359 data: &FdMatrix,
360 i: usize,
361 center: &[f64],
362 m: usize,
363 score_type: NonConformityScore,
364) -> f64 {
365 match score_type {
366 NonConformityScore::SupNorm => (0..m)
367 .map(|j| (data[(i, j)] - center[j]).abs())
368 .fold(0.0_f64, f64::max),
369 NonConformityScore::L2 => {
370 let ss: f64 = (0..m).map(|j| (data[(i, j)] - center[j]).powi(2)).sum();
371 ss.sqrt()
372 }
373 }
374}
375
376fn subset_rows(data: &FdMatrix, indices: &[usize], m: usize) -> FdMatrix {
378 let n_sub = indices.len();
379 let mut sub = FdMatrix::zeros(n_sub, m);
380 for (new_i, &old_i) in indices.iter().enumerate() {
381 for j in 0..m {
382 sub[(new_i, j)] = data[(old_i, j)];
383 }
384 }
385 sub
386}
387
388pub fn conformal_prediction_band(
419 data: &FdMatrix,
420 cal_fraction: f64,
421 coverage: f64,
422 score_type: NonConformityScore,
423 seed: u64,
424) -> Option<ToleranceBand> {
425 let (n, m) = data.shape();
426 if n < 4
427 || m == 0
428 || cal_fraction <= 0.0
429 || cal_fraction >= 1.0
430 || coverage <= 0.0
431 || coverage >= 1.0
432 {
433 return None;
434 }
435
436 let n_cal = ((n as f64) * cal_fraction).max(1.0).min((n - 2) as f64) as usize;
437 let n_train = n - n_cal;
438
439 let mut indices: Vec<usize> = (0..n).collect();
441 let mut rng = StdRng::seed_from_u64(seed);
442 indices.shuffle(&mut rng);
443
444 let train_data = subset_rows(data, &indices[..n_train], m);
445 let center = mean_1d(&train_data);
446
447 let cal_idx = &indices[n_train..];
449 let mut scores: Vec<f64> = cal_idx
450 .iter()
451 .map(|&i| nonconformity_score(data, i, ¢er, m, score_type))
452 .collect();
453
454 let level = (((n_cal + 1) as f64 * coverage).ceil() / n_cal as f64).min(1.0);
456 let q = percentile_sorted(&mut scores, level);
457
458 let half_width = match score_type {
460 NonConformityScore::SupNorm => vec![q; m],
461 NonConformityScore::L2 => vec![q / (m as f64).sqrt(); m],
462 };
463
464 Some(build_band(center, half_width))
465}
466
467fn residual_sigma(data: &FdMatrix, center: &[f64], n: usize, m: usize) -> Vec<f64> {
471 let nf_minus1 = (n as f64 - 1.0).max(1.0);
472 (0..m)
473 .map(|j| {
474 let var: f64 = (0..n).map(|i| (data[(i, j)] - center[j]).powi(2)).sum();
475 (var / nf_minus1).sqrt().max(1e-15)
476 })
477 .collect()
478}
479
480fn generate_multiplier_weights(
482 rng: &mut StdRng,
483 n: usize,
484 multiplier: MultiplierDistribution,
485) -> Vec<f64> {
486 (0..n)
487 .map(|_| match multiplier {
488 MultiplierDistribution::Gaussian => rng.sample::<f64, _>(StandardNormal),
489 MultiplierDistribution::Rademacher => {
490 if rng.gen_bool(0.5) {
491 1.0
492 } else {
493 -1.0
494 }
495 }
496 })
497 .collect()
498}
499
500pub fn scb_mean_degras(
530 data: &FdMatrix,
531 argvals: &[f64],
532 bandwidth: f64,
533 nb: usize,
534 confidence: f64,
535 multiplier: MultiplierDistribution,
536) -> Option<ToleranceBand> {
537 let (n, m) = data.shape();
538 if n < 3
539 || m == 0
540 || argvals.len() != m
541 || bandwidth <= 0.0
542 || nb == 0
543 || confidence <= 0.0
544 || confidence >= 1.0
545 {
546 return None;
547 }
548
549 let raw_mean = mean_1d(data);
550 let center = local_polynomial(argvals, &raw_mean, argvals, bandwidth, 1, "epanechnikov");
551 let sigma_hat = residual_sigma(data, ¢er, n, m);
552
553 let sqrt_n = (n as f64).sqrt();
554 let mut sup_stats: Vec<f64> = iter_maybe_parallel!(0..nb)
555 .map(|b| {
556 let mut rng = StdRng::seed_from_u64(42 + b as u64);
557 let weights = generate_multiplier_weights(&mut rng, n, multiplier);
558 (0..m)
559 .map(|j| {
560 let z: f64 = (0..n)
561 .map(|i| weights[i] * (data[(i, j)] - center[j]))
562 .sum::<f64>()
563 / (sqrt_n * sigma_hat[j]);
564 z.abs()
565 })
566 .fold(0.0_f64, f64::max)
567 })
568 .collect();
569
570 let c = percentile_sorted(&mut sup_stats, confidence);
571 let half_width: Vec<f64> = sigma_hat.iter().map(|&s| c * s / sqrt_n).collect();
572
573 Some(build_band(center, half_width))
574}
575
576fn apply_link(value: f64, family: ExponentialFamily) -> f64 {
580 match family {
581 ExponentialFamily::Gaussian => value,
582 ExponentialFamily::Binomial => {
583 let p = value.clamp(1e-10, 1.0 - 1e-10);
585 (p / (1.0 - p)).ln()
586 }
587 ExponentialFamily::Poisson => {
588 value.max(1e-10).ln()
590 }
591 }
592}
593
594fn apply_inverse_link(value: f64, family: ExponentialFamily) -> f64 {
596 match family {
597 ExponentialFamily::Gaussian => value,
598 ExponentialFamily::Binomial => {
599 1.0 / (1.0 + (-value).exp())
601 }
602 ExponentialFamily::Poisson => {
603 value.exp()
605 }
606 }
607}
608
609fn transform_data(data: &FdMatrix, family: ExponentialFamily) -> FdMatrix {
611 let (n, m) = data.shape();
612 let mut out = FdMatrix::zeros(n, m);
613 for j in 0..m {
614 for i in 0..n {
615 out[(i, j)] = apply_link(data[(i, j)], family);
616 }
617 }
618 out
619}
620
621fn inverse_link_band(band: ToleranceBand, family: ExponentialFamily) -> ToleranceBand {
623 let lower: Vec<f64> = band
624 .lower
625 .iter()
626 .map(|&v| apply_inverse_link(v, family))
627 .collect();
628 let upper: Vec<f64> = band
629 .upper
630 .iter()
631 .map(|&v| apply_inverse_link(v, family))
632 .collect();
633 let center: Vec<f64> = band
634 .center
635 .iter()
636 .map(|&v| apply_inverse_link(v, family))
637 .collect();
638 let half_width: Vec<f64> = upper
639 .iter()
640 .zip(lower.iter())
641 .map(|(&u, &l)| (u - l) / 2.0)
642 .collect();
643 ToleranceBand {
644 lower,
645 upper,
646 center,
647 half_width,
648 }
649}
650
651pub fn exponential_family_tolerance_band(
691 data: &FdMatrix,
692 family: ExponentialFamily,
693 ncomp: usize,
694 nb: usize,
695 coverage: f64,
696 seed: u64,
697) -> Option<ToleranceBand> {
698 let (n, m) = data.shape();
699 if !valid_band_params(n, m, ncomp, nb, coverage) {
700 return None;
701 }
702
703 let transformed = transform_data(data, family);
704 let band = fpca_tolerance_band(
705 &transformed,
706 ncomp,
707 nb,
708 coverage,
709 BandType::Simultaneous,
710 seed,
711 )?;
712 Some(inverse_link_band(band, family))
713}
714
715pub fn elastic_tolerance_band(
750 data: &FdMatrix,
751 argvals: &[f64],
752 ncomp: usize,
753 nb: usize,
754 coverage: f64,
755 band_type: BandType,
756 max_iter: usize,
757 seed: u64,
758) -> Option<ToleranceBand> {
759 let (n, m) = data.shape();
760 if !valid_band_params(n, m, ncomp, nb, coverage) || argvals.len() != m || max_iter == 0 {
761 return None;
762 }
763
764 let karcher = crate::alignment::karcher_mean(data, argvals, max_iter, 1e-4, 0.0);
766
767 fpca_tolerance_band(&karcher.aligned_data, ncomp, nb, coverage, band_type, seed)
769}
770
771fn equivalence_multiplier_sup_stats(
775 centered1: &FdMatrix,
776 centered2: &FdMatrix,
777 pooled_se: &[f64],
778 n1: usize,
779 n2: usize,
780 m: usize,
781 nb: usize,
782 multiplier: MultiplierDistribution,
783 seed: u64,
784) -> Vec<f64> {
785 let n1f = n1 as f64;
786 let n2f = n2 as f64;
787 iter_maybe_parallel!(0..nb)
788 .map(|b| {
789 let mut rng = StdRng::seed_from_u64(seed + b as u64);
790 let g1 = generate_multiplier_weights(&mut rng, n1, multiplier);
791 let g2 = generate_multiplier_weights(&mut rng, n2, multiplier);
792 (0..m)
793 .map(|j| {
794 let s1: f64 = (0..n1).map(|i| g1[i] * centered1[(i, j)]).sum::<f64>() / n1f;
795 let s2: f64 = (0..n2).map(|i| g2[i] * centered2[(i, j)]).sum::<f64>() / n2f;
796 ((s1 - s2) / pooled_se[j]).abs()
797 })
798 .fold(0.0_f64, f64::max)
799 })
800 .collect()
801}
802
803fn equivalence_percentile_sup_stats(
805 data1: &FdMatrix,
806 data2: &FdMatrix,
807 d_hat: &[f64],
808 n1: usize,
809 n2: usize,
810 m: usize,
811 nb: usize,
812 seed: u64,
813) -> Vec<f64> {
814 iter_maybe_parallel!(0..nb)
815 .map(|b| {
816 let mut rng = StdRng::seed_from_u64(seed + b as u64);
817 let idx1: Vec<usize> = (0..n1).map(|_| rng.gen_range(0..n1)).collect();
818 let idx2: Vec<usize> = (0..n2).map(|_| rng.gen_range(0..n2)).collect();
819 (0..m)
820 .map(|j| {
821 let m1: f64 = idx1.iter().map(|&i| data1[(i, j)]).sum::<f64>() / n1 as f64;
822 let m2: f64 = idx2.iter().map(|&i| data2[(i, j)]).sum::<f64>() / n2 as f64;
823 ((m1 - m2) - d_hat[j]).abs()
824 })
825 .fold(0.0_f64, f64::max)
826 })
827 .collect()
828}
829
830fn equivalence_one_sample_multiplier_stats(
832 centered: &FdMatrix,
833 sigma: &[f64],
834 n: usize,
835 m: usize,
836 nb: usize,
837 multiplier: MultiplierDistribution,
838 seed: u64,
839) -> Vec<f64> {
840 let sqrt_n = (n as f64).sqrt();
841 iter_maybe_parallel!(0..nb)
842 .map(|b| {
843 let mut rng = StdRng::seed_from_u64(seed + b as u64);
844 let g = generate_multiplier_weights(&mut rng, n, multiplier);
845 (0..m)
846 .map(|j| {
847 let z: f64 =
848 (0..n).map(|i| g[i] * centered[(i, j)]).sum::<f64>() / (sqrt_n * sigma[j]);
849 z.abs()
850 })
851 .fold(0.0_f64, f64::max)
852 })
853 .collect()
854}
855
856fn equivalence_one_sample_percentile_stats(
858 data: &FdMatrix,
859 mu0: &[f64],
860 d_hat: &[f64],
861 n: usize,
862 m: usize,
863 nb: usize,
864 seed: u64,
865) -> Vec<f64> {
866 iter_maybe_parallel!(0..nb)
867 .map(|b| {
868 let mut rng = StdRng::seed_from_u64(seed + b as u64);
869 let idx: Vec<usize> = (0..n).map(|_| rng.gen_range(0..n)).collect();
870 (0..m)
871 .map(|j| {
872 let boot_mean: f64 = idx.iter().map(|&i| data[(i, j)]).sum::<f64>() / n as f64;
873 ((boot_mean - mu0[j]) - d_hat[j]).abs()
874 })
875 .fold(0.0_f64, f64::max)
876 })
877 .collect()
878}
879
880fn valid_equivalence_params(delta: f64, alpha: f64, nb: usize) -> bool {
882 delta > 0.0 && alpha > 0.0 && alpha < 0.5 && nb > 0
883}
884
885fn build_equivalence_result(
890 mut sup_stats: Vec<f64>,
891 d_hat: Vec<f64>,
892 se: &[f64],
893 delta: f64,
894 alpha: f64,
895 nb: usize,
896) -> EquivalenceTestResult {
897 let m = d_hat.len();
898 let test_statistic = d_hat.iter().fold(0.0_f64, |a, &v| a.max(v.abs()));
899 let use_se = !se.is_empty();
900 let c_alpha = percentile_sorted(&mut sup_stats, 1.0 - 2.0 * alpha);
901
902 let half_width: Vec<f64> = if use_se {
903 se.iter().map(|&s| c_alpha * s).collect()
904 } else {
905 vec![c_alpha; m]
906 };
907 let scb = build_band(d_hat.clone(), half_width);
908
909 let equivalent = scb.upper.iter().all(|&u| u < delta) && scb.lower.iter().all(|&l| l > -delta);
910
911 let c_threshold = if use_se {
912 (0..m)
913 .map(|j| (delta - d_hat[j].abs()) / se[j])
914 .fold(f64::INFINITY, f64::min)
915 } else {
916 delta - test_statistic
917 };
918 let p_value = if c_threshold <= 0.0 {
919 1.0
920 } else {
921 let count = sup_stats.iter().filter(|&&t| t >= c_threshold).count();
922 (count as f64 / nb as f64).min(1.0)
923 };
924
925 EquivalenceTestResult {
926 test_statistic,
927 p_value,
928 critical_value: c_alpha,
929 scb,
930 equivalent,
931 delta,
932 alpha,
933 }
934}
935
936pub fn equivalence_test(
953 data1: &FdMatrix,
954 data2: &FdMatrix,
955 delta: f64,
956 alpha: f64,
957 nb: usize,
958 bootstrap: EquivalenceBootstrap,
959 seed: u64,
960) -> Option<EquivalenceTestResult> {
961 let (n1, m1) = data1.shape();
962 let (n2, m2) = data2.shape();
963 if n1 < 3 || n2 < 3 || m1 != m2 || m1 == 0 || !valid_equivalence_params(delta, alpha, nb) {
964 return None;
965 }
966 let m = m1;
967
968 let mean1 = mean_1d(data1);
969 let mean2 = mean_1d(data2);
970 let d_hat: Vec<f64> = (0..m).map(|j| mean1[j] - mean2[j]).collect();
971
972 let (sup_stats, se) = match bootstrap {
973 EquivalenceBootstrap::Multiplier(mult) => {
974 let c1 = crate::fdata::center_1d(data1);
975 let c2 = crate::fdata::center_1d(data2);
976 let sig1 = residual_sigma(data1, &mean1, n1, m);
977 let sig2 = residual_sigma(data2, &mean2, n2, m);
978 let pse: Vec<f64> = (0..m)
979 .map(|j| {
980 (sig1[j] * sig1[j] / n1 as f64 + sig2[j] * sig2[j] / n2 as f64)
981 .sqrt()
982 .max(1e-15)
983 })
984 .collect();
985 let stats = equivalence_multiplier_sup_stats(&c1, &c2, &pse, n1, n2, m, nb, mult, seed);
986 (stats, pse)
987 }
988 EquivalenceBootstrap::Percentile => {
989 let stats = equivalence_percentile_sup_stats(data1, data2, &d_hat, n1, n2, m, nb, seed);
990 (stats, vec![])
991 }
992 };
993
994 Some(build_equivalence_result(
995 sup_stats, d_hat, &se, delta, alpha, nb,
996 ))
997}
998
999pub fn equivalence_test_one_sample(
1016 data: &FdMatrix,
1017 mu0: &[f64],
1018 delta: f64,
1019 alpha: f64,
1020 nb: usize,
1021 bootstrap: EquivalenceBootstrap,
1022 seed: u64,
1023) -> Option<EquivalenceTestResult> {
1024 let (n, m) = data.shape();
1025 if n < 3 || m == 0 || mu0.len() != m || !valid_equivalence_params(delta, alpha, nb) {
1026 return None;
1027 }
1028
1029 let mean = mean_1d(data);
1030 let d_hat: Vec<f64> = (0..m).map(|j| mean[j] - mu0[j]).collect();
1031
1032 let (sup_stats, se) = match bootstrap {
1033 EquivalenceBootstrap::Multiplier(mult) => {
1034 let centered = crate::fdata::center_1d(data);
1035 let sigma = residual_sigma(data, &mean, n, m);
1036 let se: Vec<f64> = sigma
1037 .iter()
1038 .map(|&s| (s / (n as f64).sqrt()).max(1e-15))
1039 .collect();
1040 let stats =
1041 equivalence_one_sample_multiplier_stats(¢ered, &sigma, n, m, nb, mult, seed);
1042 (stats, se)
1043 }
1044 EquivalenceBootstrap::Percentile => {
1045 let stats = equivalence_one_sample_percentile_stats(data, mu0, &d_hat, n, m, nb, seed);
1046 (stats, vec![])
1047 }
1048 };
1049
1050 Some(build_equivalence_result(
1051 sup_stats, d_hat, &se, delta, alpha, nb,
1052 ))
1053}
1054
1055#[cfg(test)]
1058mod tests {
1059 use super::*;
1060 use crate::simulation::{sim_fundata, EFunType, EValType};
1061
1062 fn uniform_grid(m: usize) -> Vec<f64> {
1063 (0..m).map(|i| i as f64 / (m - 1) as f64).collect()
1064 }
1065
1066 fn make_test_data() -> FdMatrix {
1067 let m = 50;
1068 let t = uniform_grid(m);
1069 sim_fundata(
1070 50,
1071 &t,
1072 5,
1073 EFunType::Fourier,
1074 EValType::Exponential,
1075 Some(42),
1076 )
1077 }
1078
1079 #[test]
1082 fn test_normal_quantile_symmetry() {
1083 for &p in &[0.1, 0.2, 0.3, 0.4] {
1084 let q_low = normal_quantile(p);
1085 let q_high = normal_quantile(1.0 - p);
1086 assert!(
1087 (q_low + q_high).abs() < 1e-6,
1088 "q({p}) + q({}) = {} (expected ~0)",
1089 1.0 - p,
1090 q_low + q_high
1091 );
1092 }
1093 }
1094
1095 #[test]
1096 fn test_normal_quantile_known_values() {
1097 let q975 = normal_quantile(0.975);
1098 assert!(
1099 (q975 - 1.96).abs() < 0.01,
1100 "q(0.975) = {q975}, expected ~1.96"
1101 );
1102
1103 let q50 = normal_quantile(0.5);
1104 assert!(q50.abs() < 1e-10, "q(0.5) = {q50}, expected 0.0");
1105
1106 let q_invalid = normal_quantile(0.0);
1107 assert!(q_invalid.is_nan());
1108 let q_invalid2 = normal_quantile(1.0);
1109 assert!(q_invalid2.is_nan());
1110 }
1111
1112 #[test]
1115 fn test_fpca_band_valid_output() {
1116 let data = make_test_data();
1117 let m = data.ncols();
1118
1119 let band = fpca_tolerance_band(&data, 3, 100, 0.95, BandType::Pointwise, 42);
1120 let band = band.expect("FPCA band should succeed");
1121
1122 assert_eq!(band.lower.len(), m);
1123 assert_eq!(band.upper.len(), m);
1124 assert_eq!(band.center.len(), m);
1125 assert_eq!(band.half_width.len(), m);
1126 }
1127
1128 #[test]
1129 fn test_fpca_band_lower_less_than_upper() {
1130 let data = make_test_data();
1131 let band = fpca_tolerance_band(&data, 3, 100, 0.95, BandType::Pointwise, 42).unwrap();
1132
1133 for j in 0..band.lower.len() {
1134 assert!(
1135 band.lower[j] < band.upper[j],
1136 "lower[{j}] = {} >= upper[{j}] = {}",
1137 band.lower[j],
1138 band.upper[j]
1139 );
1140 }
1141 }
1142
1143 #[test]
1144 fn test_fpca_band_deterministic() {
1145 let data = make_test_data();
1146 let b1 = fpca_tolerance_band(&data, 3, 50, 0.95, BandType::Pointwise, 123).unwrap();
1147 let b2 = fpca_tolerance_band(&data, 3, 50, 0.95, BandType::Pointwise, 123).unwrap();
1148
1149 for j in 0..b1.lower.len() {
1150 assert_eq!(b1.lower[j], b2.lower[j]);
1151 assert_eq!(b1.upper[j], b2.upper[j]);
1152 }
1153 }
1154
1155 #[test]
1156 fn test_fpca_simultaneous_wider_than_pointwise() {
1157 let data = make_test_data();
1158 let pw = fpca_tolerance_band(&data, 3, 200, 0.95, BandType::Pointwise, 42).unwrap();
1159 let sim = fpca_tolerance_band(&data, 3, 200, 0.95, BandType::Simultaneous, 42).unwrap();
1160
1161 let pw_mean_hw: f64 = pw.half_width.iter().sum::<f64>() / pw.half_width.len() as f64;
1162 let sim_mean_hw: f64 = sim.half_width.iter().sum::<f64>() / sim.half_width.len() as f64;
1163
1164 assert!(
1165 sim_mean_hw > pw_mean_hw,
1166 "Simultaneous mean half-width ({sim_mean_hw}) should exceed pointwise ({pw_mean_hw})"
1167 );
1168 }
1169
1170 #[test]
1171 fn test_fpca_higher_coverage_wider() {
1172 let data = make_test_data();
1173 let b90 = fpca_tolerance_band(&data, 3, 200, 0.90, BandType::Pointwise, 42).unwrap();
1174 let b99 = fpca_tolerance_band(&data, 3, 200, 0.99, BandType::Pointwise, 42).unwrap();
1175
1176 let hw90: f64 = b90.half_width.iter().sum::<f64>();
1177 let hw99: f64 = b99.half_width.iter().sum::<f64>();
1178
1179 assert!(
1180 hw99 > hw90,
1181 "99% band total half-width ({hw99}) should exceed 90% ({hw90})"
1182 );
1183 }
1184
1185 #[test]
1186 fn test_fpca_band_invalid_input() {
1187 let data = make_test_data();
1188 assert!(fpca_tolerance_band(&data, 0, 100, 0.95, BandType::Pointwise, 42).is_none());
1189 assert!(fpca_tolerance_band(&data, 3, 0, 0.95, BandType::Pointwise, 42).is_none());
1190 assert!(fpca_tolerance_band(&data, 3, 100, 0.0, BandType::Pointwise, 42).is_none());
1191 assert!(fpca_tolerance_band(&data, 3, 100, 1.0, BandType::Pointwise, 42).is_none());
1192
1193 let tiny = FdMatrix::zeros(2, 5);
1194 assert!(fpca_tolerance_band(&tiny, 1, 10, 0.95, BandType::Pointwise, 42).is_none());
1195 }
1196
1197 #[test]
1200 fn test_conformal_band_valid_output() {
1201 let data = make_test_data();
1202 let m = data.ncols();
1203
1204 let band = conformal_prediction_band(&data, 0.2, 0.95, NonConformityScore::SupNorm, 42);
1205 let band = band.expect("Conformal band should succeed");
1206
1207 assert_eq!(band.lower.len(), m);
1208 assert_eq!(band.upper.len(), m);
1209 }
1210
1211 #[test]
1212 fn test_conformal_supnorm_constant_width() {
1213 let data = make_test_data();
1214 let band =
1215 conformal_prediction_band(&data, 0.3, 0.95, NonConformityScore::SupNorm, 42).unwrap();
1216
1217 let first = band.half_width[0];
1218 for &hw in &band.half_width {
1219 assert!(
1220 (hw - first).abs() < 1e-12,
1221 "SupNorm band should have constant width"
1222 );
1223 }
1224 }
1225
1226 #[test]
1227 fn test_conformal_l2_constant_width() {
1228 let data = make_test_data();
1229 let band = conformal_prediction_band(&data, 0.3, 0.95, NonConformityScore::L2, 42).unwrap();
1230
1231 let first = band.half_width[0];
1232 for &hw in &band.half_width {
1233 assert!(
1234 (hw - first).abs() < 1e-12,
1235 "L2 band should have constant width"
1236 );
1237 }
1238 }
1239
1240 #[test]
1241 fn test_conformal_coverage_monotonicity() {
1242 let data = make_test_data();
1243 let b80 =
1244 conformal_prediction_band(&data, 0.3, 0.80, NonConformityScore::SupNorm, 42).unwrap();
1245 let b95 =
1246 conformal_prediction_band(&data, 0.3, 0.95, NonConformityScore::SupNorm, 42).unwrap();
1247
1248 assert!(
1249 b95.half_width[0] >= b80.half_width[0],
1250 "Higher coverage should give wider band"
1251 );
1252 }
1253
1254 #[test]
1255 fn test_conformal_invalid_input() {
1256 let data = make_test_data();
1257 assert!(
1258 conformal_prediction_band(&data, 0.0, 0.95, NonConformityScore::SupNorm, 42).is_none()
1259 );
1260 assert!(
1261 conformal_prediction_band(&data, 1.0, 0.95, NonConformityScore::SupNorm, 42).is_none()
1262 );
1263 assert!(
1264 conformal_prediction_band(&data, 0.2, 0.0, NonConformityScore::SupNorm, 42).is_none()
1265 );
1266
1267 let tiny = FdMatrix::zeros(3, 5);
1268 assert!(
1269 conformal_prediction_band(&tiny, 0.2, 0.95, NonConformityScore::SupNorm, 42).is_none()
1270 );
1271 }
1272
1273 #[test]
1276 fn test_scb_mean_valid_output() {
1277 let data = make_test_data();
1278 let m = data.ncols();
1279 let t = uniform_grid(m);
1280
1281 let band = scb_mean_degras(&data, &t, 0.2, 100, 0.95, MultiplierDistribution::Gaussian);
1282 let band = band.expect("SCB mean should succeed");
1283
1284 assert_eq!(band.lower.len(), m);
1285 assert_eq!(band.upper.len(), m);
1286 for j in 0..m {
1287 assert!(band.lower[j] < band.upper[j]);
1288 }
1289 }
1290
1291 #[test]
1292 fn test_scb_gaussian_vs_rademacher() {
1293 let data = make_test_data();
1294 let m = data.ncols();
1295 let t = uniform_grid(m);
1296
1297 let gauss =
1298 scb_mean_degras(&data, &t, 0.2, 200, 0.95, MultiplierDistribution::Gaussian).unwrap();
1299 let radem = scb_mean_degras(
1300 &data,
1301 &t,
1302 0.2,
1303 200,
1304 0.95,
1305 MultiplierDistribution::Rademacher,
1306 )
1307 .unwrap();
1308
1309 let gauss_mean_hw: f64 = gauss.half_width.iter().sum::<f64>() / m as f64;
1311 let radem_mean_hw: f64 = radem.half_width.iter().sum::<f64>() / m as f64;
1312 assert!(gauss_mean_hw > 0.0);
1313 assert!(radem_mean_hw > 0.0);
1314 }
1315
1316 #[test]
1317 fn test_scb_narrows_with_more_data() {
1318 let m = 50;
1319 let t = uniform_grid(m);
1320
1321 let data_small = sim_fundata(
1322 20,
1323 &t,
1324 5,
1325 EFunType::Fourier,
1326 EValType::Exponential,
1327 Some(42),
1328 );
1329 let data_large = sim_fundata(
1330 200,
1331 &t,
1332 5,
1333 EFunType::Fourier,
1334 EValType::Exponential,
1335 Some(42),
1336 );
1337
1338 let band_small = scb_mean_degras(
1339 &data_small,
1340 &t,
1341 0.2,
1342 100,
1343 0.95,
1344 MultiplierDistribution::Gaussian,
1345 )
1346 .unwrap();
1347 let band_large = scb_mean_degras(
1348 &data_large,
1349 &t,
1350 0.2,
1351 100,
1352 0.95,
1353 MultiplierDistribution::Gaussian,
1354 )
1355 .unwrap();
1356
1357 let hw_small: f64 = band_small.half_width.iter().sum::<f64>() / m as f64;
1358 let hw_large: f64 = band_large.half_width.iter().sum::<f64>() / m as f64;
1359
1360 assert!(
1361 hw_large < hw_small,
1362 "SCB should narrow with more data: hw_small={hw_small}, hw_large={hw_large}"
1363 );
1364 }
1365
1366 #[test]
1367 fn test_scb_invalid_input() {
1368 let data = make_test_data();
1369 let t = uniform_grid(data.ncols());
1370
1371 assert!(
1372 scb_mean_degras(&data, &t, 0.0, 100, 0.95, MultiplierDistribution::Gaussian).is_none()
1373 );
1374 assert!(
1375 scb_mean_degras(&data, &t, 0.2, 0, 0.95, MultiplierDistribution::Gaussian).is_none()
1376 );
1377 assert!(
1378 scb_mean_degras(&data, &t, 0.2, 100, 0.0, MultiplierDistribution::Gaussian).is_none()
1379 );
1380 let wrong_t = uniform_grid(data.ncols() + 1);
1382 assert!(scb_mean_degras(
1383 &data,
1384 &wrong_t,
1385 0.2,
1386 100,
1387 0.95,
1388 MultiplierDistribution::Gaussian
1389 )
1390 .is_none());
1391 }
1392
1393 #[test]
1396 fn test_exp_family_gaussian_matches_fpca() {
1397 let data = make_test_data();
1398
1399 let exp_band =
1400 exponential_family_tolerance_band(&data, ExponentialFamily::Gaussian, 3, 100, 0.95, 42)
1401 .unwrap();
1402
1403 let fpca_band =
1404 fpca_tolerance_band(&data, 3, 100, 0.95, BandType::Simultaneous, 42).unwrap();
1405
1406 for j in 0..data.ncols() {
1408 assert!(
1409 (exp_band.lower[j] - fpca_band.lower[j]).abs() < 1e-10,
1410 "Gaussian family should match FPCA at point {j}"
1411 );
1412 assert!(
1413 (exp_band.upper[j] - fpca_band.upper[j]).abs() < 1e-10,
1414 "Gaussian family should match FPCA at point {j}"
1415 );
1416 }
1417 }
1418
1419 #[test]
1420 fn test_exp_family_poisson() {
1421 let m = 30;
1423 let t = uniform_grid(m);
1424 let raw = sim_fundata(
1425 40,
1426 &t,
1427 3,
1428 EFunType::Fourier,
1429 EValType::Exponential,
1430 Some(99),
1431 );
1432
1433 let mut data = FdMatrix::zeros(40, m);
1435 for j in 0..m {
1436 for i in 0..40 {
1437 data[(i, j)] = (raw[(i, j)] + 5.0).max(0.1); }
1439 }
1440
1441 let band =
1442 exponential_family_tolerance_band(&data, ExponentialFamily::Poisson, 3, 50, 0.95, 42);
1443 let band = band.expect("Poisson band should succeed");
1444
1445 for j in 0..m {
1447 assert!(
1448 band.lower[j] > 0.0,
1449 "Poisson lower bound should be positive"
1450 );
1451 assert!(
1452 band.upper[j] > 0.0,
1453 "Poisson upper bound should be positive"
1454 );
1455 }
1456 }
1457
1458 #[test]
1459 fn test_exp_family_binomial() {
1460 let m = 30;
1462 let t = uniform_grid(m);
1463 let raw = sim_fundata(
1464 40,
1465 &t,
1466 3,
1467 EFunType::Fourier,
1468 EValType::Exponential,
1469 Some(77),
1470 );
1471
1472 let mut data = FdMatrix::zeros(40, m);
1473 for j in 0..m {
1474 for i in 0..40 {
1475 data[(i, j)] = 1.0 / (1.0 + (-raw[(i, j)]).exp());
1477 }
1478 }
1479
1480 let band =
1481 exponential_family_tolerance_band(&data, ExponentialFamily::Binomial, 3, 50, 0.95, 42);
1482 let band = band.expect("Binomial band should succeed");
1483
1484 for j in 0..m {
1486 assert!(
1487 band.lower[j] > 0.0 && band.lower[j] < 1.0,
1488 "Binomial lower bound at {j} = {} should be in (0,1)",
1489 band.lower[j]
1490 );
1491 assert!(
1492 band.upper[j] > 0.0 && band.upper[j] < 1.0,
1493 "Binomial upper bound at {j} = {} should be in (0,1)",
1494 band.upper[j]
1495 );
1496 }
1497 }
1498
1499 #[test]
1500 fn test_exp_family_invalid_input() {
1501 let data = make_test_data();
1502 assert!(exponential_family_tolerance_band(
1503 &data,
1504 ExponentialFamily::Gaussian,
1505 0,
1506 100,
1507 0.95,
1508 42
1509 )
1510 .is_none());
1511 assert!(exponential_family_tolerance_band(
1512 &data,
1513 ExponentialFamily::Gaussian,
1514 3,
1515 0,
1516 0.95,
1517 42
1518 )
1519 .is_none());
1520 }
1521
1522 fn make_elastic_test_data() -> (FdMatrix, Vec<f64>) {
1525 let m = 50;
1526 let t = uniform_grid(m);
1527 let data = sim_fundata(
1528 30,
1529 &t,
1530 5,
1531 EFunType::Fourier,
1532 EValType::Exponential,
1533 Some(42),
1534 );
1535 (data, t)
1536 }
1537
1538 #[test]
1539 fn test_elastic_band_valid_output() {
1540 let (data, t) = make_elastic_test_data();
1541 let m = t.len();
1542
1543 let band = elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 42);
1544 let band = band.expect("Elastic band should succeed");
1545
1546 assert_eq!(band.lower.len(), m);
1547 assert_eq!(band.upper.len(), m);
1548 assert_eq!(band.center.len(), m);
1549 assert_eq!(band.half_width.len(), m);
1550 }
1551
1552 #[test]
1553 fn test_elastic_band_lower_less_than_upper() {
1554 let (data, t) = make_elastic_test_data();
1555 let m = t.len();
1556
1557 let band =
1558 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 42).unwrap();
1559
1560 for j in 0..m {
1561 assert!(
1562 band.lower[j] < band.upper[j],
1563 "lower[{j}] = {} >= upper[{j}] = {}",
1564 band.lower[j],
1565 band.upper[j]
1566 );
1567 }
1568 }
1569
1570 #[test]
1571 fn test_elastic_band_center_within_bounds() {
1572 let (data, t) = make_elastic_test_data();
1573 let m = t.len();
1574
1575 let band =
1576 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 42).unwrap();
1577
1578 for j in 0..m {
1579 assert!(
1580 band.center[j] >= band.lower[j] && band.center[j] <= band.upper[j],
1581 "center[{j}]={} should be in [{}, {}]",
1582 band.center[j],
1583 band.lower[j],
1584 band.upper[j]
1585 );
1586 }
1587 }
1588
1589 #[test]
1590 fn test_elastic_band_half_width_positive() {
1591 let (data, t) = make_elastic_test_data();
1592
1593 let band =
1594 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 42).unwrap();
1595
1596 for (j, &hw) in band.half_width.iter().enumerate() {
1597 assert!(hw > 0.0, "half_width[{j}] should be positive, got {hw}");
1598 }
1599 }
1600
1601 #[test]
1602 fn test_elastic_band_simultaneous() {
1603 let (data, t) = make_elastic_test_data();
1604 let m = t.len();
1605
1606 let band = elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Simultaneous, 5, 42);
1607 let band = band.expect("Elastic simultaneous band should succeed");
1608
1609 assert_eq!(band.lower.len(), m);
1610 for j in 0..m {
1611 assert!(band.lower[j] < band.upper[j]);
1612 }
1613 }
1614
1615 #[test]
1616 fn test_elastic_band_deterministic() {
1617 let (data, t) = make_elastic_test_data();
1618
1619 let b1 =
1620 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 123).unwrap();
1621 let b2 =
1622 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 5, 123).unwrap();
1623
1624 for j in 0..t.len() {
1625 assert_eq!(
1626 b1.lower[j], b2.lower[j],
1627 "lower[{j}] should be deterministic"
1628 );
1629 assert_eq!(
1630 b1.upper[j], b2.upper[j],
1631 "upper[{j}] should be deterministic"
1632 );
1633 }
1634 }
1635
1636 #[test]
1637 fn test_elastic_band_higher_coverage_wider() {
1638 let (data, t) = make_elastic_test_data();
1639
1640 let b90 =
1641 elastic_tolerance_band(&data, &t, 3, 100, 0.90, BandType::Pointwise, 5, 42).unwrap();
1642 let b99 =
1643 elastic_tolerance_band(&data, &t, 3, 100, 0.99, BandType::Pointwise, 5, 42).unwrap();
1644
1645 let hw90: f64 = b90.half_width.iter().sum();
1646 let hw99: f64 = b99.half_width.iter().sum();
1647
1648 assert!(
1649 hw99 > hw90,
1650 "99% coverage band should be wider than 90%: hw99={hw99:.4}, hw90={hw90:.4}"
1651 );
1652 }
1653
1654 #[test]
1655 fn test_elastic_band_invalid_input() {
1656 let (data, t) = make_elastic_test_data();
1657
1658 let wrong_t = uniform_grid(t.len() + 1);
1660 assert!(
1661 elastic_tolerance_band(&data, &wrong_t, 3, 50, 0.95, BandType::Pointwise, 5, 42)
1662 .is_none()
1663 );
1664
1665 assert!(
1667 elastic_tolerance_band(&data, &t, 3, 50, 0.95, BandType::Pointwise, 0, 42).is_none()
1668 );
1669
1670 assert!(
1672 elastic_tolerance_band(&data, &t, 0, 50, 0.95, BandType::Pointwise, 5, 42).is_none()
1673 );
1674
1675 assert!(
1677 elastic_tolerance_band(&data, &t, 3, 0, 0.95, BandType::Pointwise, 5, 42).is_none()
1678 );
1679
1680 assert!(
1682 elastic_tolerance_band(&data, &t, 3, 50, 0.0, BandType::Pointwise, 5, 42).is_none()
1683 );
1684 assert!(
1685 elastic_tolerance_band(&data, &t, 3, 50, 1.0, BandType::Pointwise, 5, 42).is_none()
1686 );
1687
1688 let tiny = FdMatrix::zeros(2, t.len());
1690 assert!(
1691 elastic_tolerance_band(&tiny, &t, 1, 10, 0.95, BandType::Pointwise, 5, 42).is_none()
1692 );
1693 }
1694
1695 fn make_equivalent_groups() -> (FdMatrix, FdMatrix) {
1698 let m = 50;
1699 let t = uniform_grid(m);
1700 let d1 = sim_fundata(
1701 30,
1702 &t,
1703 5,
1704 EFunType::Fourier,
1705 EValType::Exponential,
1706 Some(42),
1707 );
1708 let d2 = sim_fundata(
1709 30,
1710 &t,
1711 5,
1712 EFunType::Fourier,
1713 EValType::Exponential,
1714 Some(142),
1715 );
1716 (d1, d2)
1717 }
1718
1719 fn make_shifted_groups() -> (FdMatrix, FdMatrix) {
1720 let m = 50;
1721 let t = uniform_grid(m);
1722 let d1 = sim_fundata(
1723 30,
1724 &t,
1725 5,
1726 EFunType::Fourier,
1727 EValType::Exponential,
1728 Some(42),
1729 );
1730 let mut d2 = sim_fundata(
1731 30,
1732 &t,
1733 5,
1734 EFunType::Fourier,
1735 EValType::Exponential,
1736 Some(142),
1737 );
1738 let (n2, m2) = d2.shape();
1739 for i in 0..n2 {
1740 for j in 0..m2 {
1741 d2[(i, j)] += 10.0;
1742 }
1743 }
1744 (d1, d2)
1745 }
1746
1747 #[test]
1748 fn test_equivalence_invalid_inputs() {
1749 let (data1, data2) = make_equivalent_groups();
1750 let bs = EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian);
1751
1752 let tiny = FdMatrix::zeros(2, 50);
1753 assert!(equivalence_test(&tiny, &data2, 1.0, 0.05, 100, bs, 42).is_none());
1754 assert!(equivalence_test(&data1, &tiny, 1.0, 0.05, 100, bs, 42).is_none());
1755
1756 let wrong_m = FdMatrix::zeros(30, 40);
1757 assert!(equivalence_test(&data1, &wrong_m, 1.0, 0.05, 100, bs, 42).is_none());
1758
1759 assert!(equivalence_test(&data1, &data2, 0.0, 0.05, 100, bs, 42).is_none());
1760 assert!(equivalence_test(&data1, &data2, -1.0, 0.05, 100, bs, 42).is_none());
1761 assert!(equivalence_test(&data1, &data2, 1.0, 0.0, 100, bs, 42).is_none());
1762 assert!(equivalence_test(&data1, &data2, 1.0, 0.5, 100, bs, 42).is_none());
1763 assert!(equivalence_test(&data1, &data2, 1.0, 0.05, 0, bs, 42).is_none());
1764 }
1765
1766 #[test]
1767 fn test_equivalence_deterministic() {
1768 let (data1, data2) = make_equivalent_groups();
1769 let bs = EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian);
1770
1771 let r1 = equivalence_test(&data1, &data2, 5.0, 0.05, 100, bs, 42).unwrap();
1772 let r2 = equivalence_test(&data1, &data2, 5.0, 0.05, 100, bs, 42).unwrap();
1773
1774 assert_eq!(r1.test_statistic, r2.test_statistic);
1775 assert_eq!(r1.p_value, r2.p_value);
1776 assert_eq!(r1.critical_value, r2.critical_value);
1777 assert_eq!(r1.equivalent, r2.equivalent);
1778 }
1779
1780 #[test]
1781 fn test_equivalence_identical_groups() {
1782 let data = make_test_data();
1783 let r = equivalence_test(
1784 &data,
1785 &data,
1786 10.0,
1787 0.05,
1788 200,
1789 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1790 42,
1791 )
1792 .unwrap();
1793 assert!(
1794 r.equivalent,
1795 "Identical groups with large delta should be equivalent"
1796 );
1797 }
1798
1799 #[test]
1800 fn test_equivalence_different_groups() {
1801 let (data1, data2) = make_shifted_groups();
1802 let r = equivalence_test(
1803 &data1,
1804 &data2,
1805 0.5,
1806 0.05,
1807 200,
1808 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1809 42,
1810 )
1811 .unwrap();
1812 assert!(
1813 !r.equivalent,
1814 "Shifted groups with small delta should not be equivalent"
1815 );
1816 }
1817
1818 #[test]
1819 fn test_equivalence_scb_properties() {
1820 let (data1, data2) = make_equivalent_groups();
1821 let r = equivalence_test(
1822 &data1,
1823 &data2,
1824 5.0,
1825 0.05,
1826 200,
1827 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1828 42,
1829 )
1830 .unwrap();
1831
1832 for j in 0..r.scb.lower.len() {
1833 assert!(
1834 r.scb.lower[j] < r.scb.center[j],
1835 "lower[{j}] should be < center[{j}]"
1836 );
1837 assert!(
1838 r.scb.center[j] < r.scb.upper[j],
1839 "center[{j}] should be < upper[{j}]"
1840 );
1841 }
1842 }
1843
1844 #[test]
1845 fn test_equivalence_larger_delta_easier() {
1846 let (data1, data2) = make_equivalent_groups();
1847 let bs = EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian);
1848
1849 let r_small = equivalence_test(&data1, &data2, 1.0, 0.05, 200, bs, 42).unwrap();
1850 let r_large = equivalence_test(&data1, &data2, 100.0, 0.05, 200, bs, 42).unwrap();
1851
1852 assert!(
1853 r_large.equivalent || !r_small.equivalent,
1854 "Larger delta should be at least as likely equivalent"
1855 );
1856 assert!(
1857 r_large.p_value <= r_small.p_value + 1e-10,
1858 "Larger delta p-value ({}) should be <= smaller delta p-value ({})",
1859 r_large.p_value,
1860 r_small.p_value
1861 );
1862 }
1863
1864 #[test]
1865 fn test_equivalence_pvalue_range() {
1866 let (data1, data2) = make_equivalent_groups();
1867 let r = equivalence_test(
1868 &data1,
1869 &data2,
1870 5.0,
1871 0.05,
1872 200,
1873 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1874 42,
1875 )
1876 .unwrap();
1877 assert!(r.p_value >= 0.0, "p_value should be >= 0");
1878 assert!(r.p_value <= 1.0, "p_value should be <= 1");
1879 }
1880
1881 #[test]
1882 fn test_equivalence_pvalue_consistent() {
1883 let (data1, data2) = make_equivalent_groups();
1884 let bs = EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian);
1885
1886 let r = equivalence_test(&data1, &data2, 100.0, 0.05, 500, bs, 42).unwrap();
1887 if r.equivalent {
1888 assert!(
1889 r.p_value < r.alpha,
1890 "equivalent=true should imply p_value ({}) < alpha ({})",
1891 r.p_value,
1892 r.alpha
1893 );
1894 }
1895
1896 let r2 = equivalence_test(&data1, &data2, 0.001, 0.05, 500, bs, 42).unwrap();
1897 if !r2.equivalent {
1898 assert!(
1899 r2.p_value >= r2.alpha,
1900 "equivalent=false should imply p_value ({}) >= alpha ({})",
1901 r2.p_value,
1902 r2.alpha
1903 );
1904 }
1905 }
1906
1907 #[test]
1908 fn test_equivalence_percentile() {
1909 let (data1, data2) = make_equivalent_groups();
1910 let r = equivalence_test(
1911 &data1,
1912 &data2,
1913 5.0,
1914 0.05,
1915 200,
1916 EquivalenceBootstrap::Percentile,
1917 42,
1918 )
1919 .unwrap();
1920
1921 assert!(r.test_statistic >= 0.0);
1922 assert!(r.p_value >= 0.0 && r.p_value <= 1.0);
1923 assert!(r.critical_value >= 0.0);
1924 }
1925
1926 #[test]
1927 fn test_equivalence_one_sample_equivalent() {
1928 let data = make_test_data();
1929 let mu0 = mean_1d(&data);
1930
1931 let r = equivalence_test_one_sample(
1932 &data,
1933 &mu0,
1934 10.0,
1935 0.05,
1936 200,
1937 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1938 42,
1939 )
1940 .unwrap();
1941 assert!(
1942 r.equivalent,
1943 "Data vs its own mean with large delta should be equivalent"
1944 );
1945 }
1946
1947 #[test]
1948 fn test_equivalence_one_sample_shifted() {
1949 let data = make_test_data();
1950 let mu0 = vec![100.0; data.ncols()];
1951
1952 let r = equivalence_test_one_sample(
1953 &data,
1954 &mu0,
1955 0.5,
1956 0.05,
1957 200,
1958 EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian),
1959 42,
1960 )
1961 .unwrap();
1962 assert!(
1963 !r.equivalent,
1964 "Data vs far-away mu0 should not be equivalent"
1965 );
1966 }
1967
1968 #[test]
1969 fn test_equivalence_one_sample_invalid() {
1970 let data = make_test_data();
1971 let mu0 = vec![0.0; data.ncols()];
1972 let bs = EquivalenceBootstrap::Multiplier(MultiplierDistribution::Gaussian);
1973
1974 let tiny = FdMatrix::zeros(2, data.ncols());
1975 assert!(equivalence_test_one_sample(&tiny, &mu0, 1.0, 0.05, 100, bs, 42).is_none());
1976 assert!(equivalence_test_one_sample(&data, &[0.0; 10], 1.0, 0.05, 100, bs, 42).is_none());
1977 assert!(equivalence_test_one_sample(&data, &mu0, 0.0, 0.05, 100, bs, 42).is_none());
1978 assert!(equivalence_test_one_sample(&data, &mu0, 1.0, 0.5, 100, bs, 42).is_none());
1979 }
1980
1981 #[test]
1982 fn test_constant_data_fpca_tolerance() {
1983 let n = 10;
1984 let m = 20;
1985 let data = FdMatrix::from_column_major(vec![5.0; n * m], n, m).unwrap();
1986 let band = fpca_tolerance_band(&data, 2, 200, 0.95, BandType::Pointwise, 42);
1988 if let Some(band) = band {
1990 assert_eq!(band.lower.len(), m);
1991 assert_eq!(band.upper.len(), m);
1992 for j in 0..m {
1993 assert!(band.lower[j].is_finite());
1994 assert!(band.upper[j].is_finite());
1995 }
1996 }
1997 }
1998
1999 #[test]
2000 fn test_n3_fpca_tolerance() {
2001 let n = 3;
2003 let m = 20;
2004 let data_vec: Vec<f64> = (0..n * m).map(|i| (i as f64 * 0.1).sin()).collect();
2005 let data = FdMatrix::from_column_major(data_vec, n, m).unwrap();
2006 let band = fpca_tolerance_band(&data, 2, 100, 0.90, BandType::Pointwise, 42);
2007 if let Some(band) = band {
2008 assert_eq!(band.lower.len(), m);
2009 assert_eq!(band.upper.len(), m);
2010 }
2011 }
2012
2013 #[test]
2014 fn test_delta_zero_equivalence() {
2015 let n = 10;
2017 let m = 20;
2018 let data1_vec: Vec<f64> = (0..n * m).map(|i| (i as f64 * 0.1).sin()).collect();
2019 let data2_vec: Vec<f64> = (0..n * m).map(|i| (i as f64 * 0.1).sin() + 0.5).collect();
2020 let data1 = FdMatrix::from_column_major(data1_vec, n, m).unwrap();
2021 let data2 = FdMatrix::from_column_major(data2_vec, n, m).unwrap();
2022 let result = equivalence_test(
2023 &data1,
2024 &data2,
2025 0.0,
2026 0.05,
2027 199,
2028 EquivalenceBootstrap::Percentile,
2029 42,
2030 );
2031 assert!(result.is_none());
2033 }
2034}