1use u_numflow::matrix::Matrix;
26use u_numflow::special;
27use u_numflow::stats;
28
29#[derive(Debug, Clone, Copy)]
31pub struct CorrelationResult {
32 pub r: f64,
34 pub p_value: f64,
36 pub n: usize,
38}
39
40#[derive(Debug, Clone, Copy)]
42pub struct CorrelationCI {
43 pub lower: f64,
45 pub upper: f64,
47 pub confidence: f64,
49}
50
51pub fn pearson(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
84 let n = x.len();
85 if n < 3 || n != y.len() {
86 return None;
87 }
88
89 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
90 return None;
91 }
92
93 let cov = stats::covariance(x, y)?;
94 let sx = stats::std_dev(x)?;
95 let sy = stats::std_dev(y)?;
96
97 if sx < 1e-300 || sy < 1e-300 {
98 return None; }
100
101 let r = (cov / (sx * sy)).clamp(-1.0, 1.0);
102 let p_value = correlation_p_value(r, n);
103
104 Some(CorrelationResult { r, p_value, n })
105}
106
107pub fn spearman(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
140 let n = x.len();
141 if n < 3 || n != y.len() {
142 return None;
143 }
144
145 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
146 return None;
147 }
148
149 let rx = rank_data(x);
150 let ry = rank_data(y);
151
152 let cov = stats::covariance(&rx, &ry)?;
154 let srx = stats::std_dev(&rx)?;
155 let sry = stats::std_dev(&ry)?;
156
157 if srx < 1e-300 || sry < 1e-300 {
158 return None;
159 }
160
161 let r = (cov / (srx * sry)).clamp(-1.0, 1.0);
162 let p_value = correlation_p_value(r, n);
163
164 Some(CorrelationResult { r, p_value, n })
165}
166
167pub fn kendall_tau_b(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
206 let n = x.len();
207 if n < 3 || n != y.len() {
208 return None;
209 }
210
211 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
212 return None;
213 }
214
215 let mut concordant: i64 = 0;
216 let mut discordant: i64 = 0;
217 let mut ties_x: i64 = 0;
218 let mut ties_y: i64 = 0;
219 let mut _ties_xy: i64 = 0;
220
221 for i in 0..n {
222 for j in (i + 1)..n {
223 let dx = x[i] - x[j];
224 let dy = y[i] - y[j];
225 let product = dx * dy;
226
227 if dx == 0.0 && dy == 0.0 {
228 _ties_xy += 1;
229 ties_x += 1;
230 ties_y += 1;
231 } else if dx == 0.0 {
232 ties_x += 1;
233 } else if dy == 0.0 {
234 ties_y += 1;
235 } else if product > 0.0 {
236 concordant += 1;
237 } else {
238 discordant += 1;
239 }
240 }
241 }
242
243 let n0 = (n as i64) * (n as i64 - 1) / 2;
244 let denom_sq = (n0 - ties_x) as f64 * (n0 - ties_y) as f64;
245
246 if denom_sq <= 0.0 {
247 return None; }
249
250 let tau = (concordant - discordant) as f64 / denom_sq.sqrt();
251 let tau = tau.clamp(-1.0, 1.0);
252
253 let nf = n as f64;
258 let v0 = nf * (nf - 1.0) * (2.0 * nf + 5.0);
259 let vt = compute_tie_variance_term(x);
260 let vu = compute_tie_variance_term(y);
261 let var_s = (v0 - vt - vu) / 18.0;
262
263 let p_value = if var_s > 0.0 {
264 let s = (concordant - discordant) as f64;
265 let z = s / var_s.sqrt();
266 2.0 * (1.0 - special::standard_normal_cdf(z.abs()))
267 } else {
268 1.0
269 };
270
271 Some(CorrelationResult { r: tau, p_value, n })
272}
273
274pub fn fisher_z(r: f64) -> Option<f64> {
292 if r <= -1.0 || r >= 1.0 || !r.is_finite() {
293 return None;
294 }
295 Some(r.atanh())
296}
297
298pub fn fisher_z_inv(z: f64) -> f64 {
300 z.tanh()
301}
302
303pub fn correlation_ci(r: f64, n: usize, confidence: f64) -> Option<CorrelationCI> {
328 if n < 4 || r <= -1.0 || r >= 1.0 || !r.is_finite() {
329 return None;
330 }
331 if confidence <= 0.0 || confidence >= 1.0 {
332 return None;
333 }
334
335 let z = r.atanh();
336 let se = 1.0 / ((n as f64 - 3.0).sqrt());
337 let alpha = 1.0 - confidence;
338 let z_crit = special::inverse_normal_cdf(1.0 - alpha / 2.0);
339
340 let z_lower = z - z_crit * se;
341 let z_upper = z + z_crit * se;
342
343 Some(CorrelationCI {
344 lower: z_lower.tanh(),
345 upper: z_upper.tanh(),
346 confidence,
347 })
348}
349
350pub fn correlation_matrix(variables: &[&[f64]]) -> Option<Matrix> {
380 let p = variables.len();
381 if p < 2 {
382 return None;
383 }
384 let n = variables[0].len();
385 if n < 3 {
386 return None;
387 }
388 for v in variables {
389 if v.len() != n {
390 return None;
391 }
392 }
393
394 let mut sds = Vec::with_capacity(p);
396 for v in variables {
397 let m = stats::mean(v)?;
398 if !m.is_finite() {
399 return None;
400 }
401 let s = stats::std_dev(v)?;
402 if !s.is_finite() || s < 1e-300 {
403 return None;
404 }
405 sds.push(s);
406 }
407
408 let mut data = vec![0.0; p * p];
409
410 for i in 0..p {
411 data[i * p + i] = 1.0; for j in (i + 1)..p {
413 let cov = stats::covariance(variables[i], variables[j])?;
414 let r = (cov / (sds[i] * sds[j])).clamp(-1.0, 1.0);
415 data[i * p + j] = r;
416 data[j * p + i] = r;
417 }
418 }
419
420 Matrix::new(p, p, data).ok()
421}
422
423pub fn spearman_matrix(variables: &[&[f64]]) -> Option<Matrix> {
427 let p = variables.len();
428 if p < 2 {
429 return None;
430 }
431 let n = variables[0].len();
432 if n < 3 {
433 return None;
434 }
435 for v in variables {
436 if v.len() != n || v.iter().any(|x| !x.is_finite()) {
437 return None;
438 }
439 }
440
441 let ranked: Vec<Vec<f64>> = variables.iter().map(|v| rank_data(v)).collect();
443 let ranked_refs: Vec<&[f64]> = ranked.iter().map(|v| v.as_slice()).collect();
444
445 correlation_matrix(&ranked_refs)
446}
447
448fn correlation_p_value(r: f64, n: usize) -> f64 {
456 if n < 3 {
457 return 1.0;
458 }
459 let df = (n - 2) as f64;
460 let r2 = r * r;
461
462 if r2 >= 1.0 - 1e-15 {
464 return 0.0; }
466
467 let t = r * (df / (1.0 - r2)).sqrt();
468 2.0 * (1.0 - special::t_distribution_cdf(t.abs(), df))
469}
470
471fn rank_data(data: &[f64]) -> Vec<f64> {
475 let n = data.len();
476 let mut indexed: Vec<(usize, f64)> = data.iter().copied().enumerate().collect();
477 indexed.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
478
479 let mut ranks = vec![0.0; n];
480 let mut i = 0;
481 while i < n {
482 let mut j = i;
483 while j < n && (indexed[j].1 - indexed[i].1).abs() < 1e-300 {
485 j += 1;
486 }
487 let avg_rank = (i + j) as f64 / 2.0 + 0.5;
489 for item in indexed.iter().take(j).skip(i) {
490 ranks[item.0] = avg_rank;
491 }
492 i = j;
493 }
494
495 ranks
496}
497
498fn compute_tie_variance_term(data: &[f64]) -> f64 {
502 let mut sorted: Vec<f64> = data.to_vec();
503 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
504
505 let mut result = 0.0;
506 let mut i = 0;
507 while i < sorted.len() {
508 let mut j = i;
509 while j < sorted.len() && (sorted[j] - sorted[i]).abs() < 1e-300 {
510 j += 1;
511 }
512 let t = (j - i) as f64;
513 if t > 1.0 {
514 result += t * (t - 1.0) * (2.0 * t + 5.0);
515 }
516 i = j;
517 }
518 result
519}
520
521#[derive(Debug, Clone)]
527pub struct AcfResult {
528 pub acf: Vec<f64>,
531 pub confidence_threshold: f64,
534}
535
536#[derive(Debug, Clone)]
538pub struct PacfResult {
539 pub pacf: Vec<f64>,
541 pub confidence_threshold: f64,
543}
544
545pub fn acf(data: &[f64], max_lag: usize) -> Option<AcfResult> {
574 let n = data.len();
575 if n < 2 || max_lag == 0 || data.iter().any(|v| !v.is_finite()) {
576 return None;
577 }
578 let max_lag = max_lag.min(n - 1);
579 let nf = n as f64;
580
581 let mean = data.iter().sum::<f64>() / nf;
582
583 let c0: f64 = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / nf;
585
586 if c0 <= 0.0 {
587 return None; }
589
590 let mut acf_vals = Vec::with_capacity(max_lag + 1);
591 acf_vals.push(1.0); for lag in 1..=max_lag {
594 let ck: f64 = data[..n - lag]
595 .iter()
596 .zip(&data[lag..])
597 .map(|(&xt, &xt_h)| (xt - mean) * (xt_h - mean))
598 .sum::<f64>()
599 / nf;
600 acf_vals.push(ck / c0);
601 }
602
603 let threshold = 1.96 / nf.sqrt();
604
605 Some(AcfResult {
606 acf: acf_vals,
607 confidence_threshold: threshold,
608 })
609}
610
611pub fn pacf(data: &[f64], max_lag: usize) -> Option<PacfResult> {
642 let n = data.len();
643 if n < 3 || max_lag == 0 {
644 return None;
645 }
646
647 let acf_result = acf(data, max_lag)?;
649 let rho = &acf_result.acf;
650 let max_lag = rho.len() - 1; if max_lag == 0 {
653 return None;
654 }
655
656 let mut pacf_vals = Vec::with_capacity(max_lag);
657
658 pacf_vals.push(rho[1]);
660
661 if max_lag == 1 {
662 return Some(PacfResult {
663 pacf: pacf_vals,
664 confidence_threshold: 1.96 / (n as f64).sqrt(),
665 });
666 }
667
668 let mut phi_prev = vec![rho[1]];
670
671 for h in 2..=max_lag {
672 let mut num = rho[h];
674 for j in 0..h - 1 {
675 num -= phi_prev[j] * rho[h - 1 - j];
676 }
677
678 let mut den = 1.0;
680 for j in 0..h - 1 {
681 den -= phi_prev[j] * rho[j + 1];
682 }
683
684 let phi_hh = if den.abs() > 1e-14 { num / den } else { 0.0 };
685
686 let mut phi_new = Vec::with_capacity(h);
688 for j in 0..h - 1 {
689 phi_new.push(phi_prev[j] - phi_hh * phi_prev[h - 2 - j]);
690 }
691 phi_new.push(phi_hh);
692
693 pacf_vals.push(phi_hh);
694 phi_prev = phi_new;
695 }
696
697 Some(PacfResult {
698 pacf: pacf_vals,
699 confidence_threshold: 1.96 / (n as f64).sqrt(),
700 })
701}
702
703#[cfg(test)]
704mod tests {
705 use super::*;
706
707 #[test]
712 fn pearson_perfect_positive() {
713 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
714 let y = [2.0, 4.0, 6.0, 8.0, 10.0];
715 let result = pearson(&x, &y).expect("should compute");
716 assert!((result.r - 1.0).abs() < 1e-10);
717 assert!(result.p_value < 1e-10);
718 }
719
720 #[test]
721 fn pearson_perfect_negative() {
722 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
723 let y = [10.0, 8.0, 6.0, 4.0, 2.0];
724 let result = pearson(&x, &y).expect("should compute");
725 assert!((result.r + 1.0).abs() < 1e-10);
726 assert!(result.p_value < 1e-10);
727 }
728
729 #[test]
730 fn pearson_uncorrelated() {
731 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
733 let y = [3.0, 1.0, 5.0, 1.0, 5.0];
734 let result = pearson(&x, &y).expect("should compute");
735 assert!(result.r.abs() < 0.5);
736 }
737
738 #[test]
739 fn pearson_known_value() {
740 let x = [68.0, 71.0, 62.0, 75.0, 58.0, 60.0, 67.0, 68.0, 71.0, 69.0];
742 let y = [4.1, 4.6, 3.8, 4.4, 3.2, 3.1, 3.8, 4.1, 4.3, 3.7];
743 let result = pearson(&x, &y).expect("should compute");
744 assert!((result.r - 0.8816).abs() < 0.01, "r = {}", result.r);
746 }
747
748 #[test]
757 fn pearson_numeric_reference() {
758 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
759 let y = [2.0, 4.0, 5.0, 4.0, 5.0];
760 let result = pearson(&x, &y).expect("should compute");
761
762 let expected_r = 6.0_f64 / (60.0_f64).sqrt(); assert!(
764 (result.r - expected_r).abs() < 1e-3,
765 "r expected {:.6}, got {}",
766 expected_r,
767 result.r
768 );
769 assert!(
770 (result.r - 0.7746).abs() < 1e-3,
771 "r expected ≈0.7746, got {}",
772 result.r
773 );
774 }
775
776 #[test]
777 fn pearson_insufficient_data() {
778 assert!(pearson(&[1.0, 2.0], &[3.0, 4.0]).is_none());
779 assert!(pearson(&[1.0], &[2.0]).is_none());
780 }
781
782 #[test]
783 fn pearson_length_mismatch() {
784 assert!(pearson(&[1.0, 2.0, 3.0], &[4.0, 5.0]).is_none());
785 }
786
787 #[test]
788 fn pearson_zero_variance() {
789 assert!(pearson(&[5.0, 5.0, 5.0], &[1.0, 2.0, 3.0]).is_none());
790 }
791
792 #[test]
793 fn pearson_nan_input() {
794 assert!(pearson(&[1.0, f64::NAN, 3.0], &[4.0, 5.0, 6.0]).is_none());
795 }
796
797 #[test]
802 fn spearman_perfect_monotone() {
803 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
804 let y = [1.0, 4.0, 9.0, 16.0, 25.0]; let result = spearman(&x, &y).expect("should compute");
806 assert!((result.r - 1.0).abs() < 1e-10);
807 }
808
809 #[test]
810 fn spearman_perfect_negative() {
811 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
812 let y = [5.0, 4.0, 3.0, 2.0, 1.0];
813 let result = spearman(&x, &y).expect("should compute");
814 assert!((result.r + 1.0).abs() < 1e-10);
815 }
816
817 #[test]
818 fn spearman_with_ties() {
819 let x = [1.0, 2.0, 2.0, 4.0, 5.0];
820 let y = [1.0, 3.0, 3.0, 4.0, 5.0];
821 let result = spearman(&x, &y).expect("should compute");
822 assert!(result.r > 0.9); }
824
825 #[test]
826 fn spearman_nonlinear_monotone() {
827 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
829 let y: Vec<f64> = x.iter().map(|&v: &f64| v.powi(3)).collect();
830 let result = spearman(&x, &y).expect("should compute");
831 assert!((result.r - 1.0).abs() < 1e-10);
832 }
833
834 #[test]
839 fn kendall_perfect_concordance() {
840 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
841 let y = [1.0, 2.0, 3.0, 4.0, 5.0];
842 let result = kendall_tau_b(&x, &y).expect("should compute");
843 assert!((result.r - 1.0).abs() < 1e-10);
844 }
845
846 #[test]
847 fn kendall_perfect_discordance() {
848 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
849 let y = [5.0, 4.0, 3.0, 2.0, 1.0];
850 let result = kendall_tau_b(&x, &y).expect("should compute");
851 assert!((result.r + 1.0).abs() < 1e-10);
852 }
853
854 #[test]
855 fn kendall_known_value() {
856 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
858 let y = [3.0, 4.0, 1.0, 2.0, 5.0];
859 let result = kendall_tau_b(&x, &y).expect("should compute");
860 assert!((result.r - 0.2).abs() < 0.01);
861 }
862
863 #[test]
864 fn kendall_with_ties() {
865 let x = [1.0, 2.0, 2.0, 4.0, 5.0];
866 let y = [1.0, 3.0, 3.0, 4.0, 5.0];
867 let result = kendall_tau_b(&x, &y).expect("should compute");
868 assert!(result.r > 0.8);
869 }
870
871 #[test]
872 fn kendall_all_ties() {
873 let x = [1.0, 1.0, 1.0];
874 let y = [2.0, 2.0, 2.0];
875 assert!(kendall_tau_b(&x, &y).is_none()); }
877
878 #[test]
883 fn fisher_z_zero() {
884 let z = fisher_z(0.0).expect("should compute");
885 assert!(z.abs() < 1e-15);
886 }
887
888 #[test]
889 fn fisher_z_roundtrip() {
890 for &r in &[0.0, 0.3, 0.5, 0.8, 0.95, -0.5, -0.95] {
891 let z = fisher_z(r).expect("should compute");
892 let r_back = fisher_z_inv(z);
893 assert!((r - r_back).abs() < 1e-10, "Roundtrip failed for r={r}");
894 }
895 }
896
897 #[test]
898 fn fisher_z_boundary() {
899 assert!(fisher_z(1.0).is_none());
900 assert!(fisher_z(-1.0).is_none());
901 assert!(fisher_z(1.5).is_none());
902 assert!(fisher_z(f64::NAN).is_none());
903 }
904
905 #[test]
910 fn ci_contains_r() {
911 let ci = correlation_ci(0.6, 50, 0.95).expect("should compute");
912 assert!(ci.lower < 0.6);
913 assert!(ci.upper > 0.6);
914 assert!(ci.lower > -1.0);
915 assert!(ci.upper < 1.0);
916 }
917
918 #[test]
919 fn ci_wider_at_higher_confidence() {
920 let ci_95 = correlation_ci(0.5, 30, 0.95).expect("should compute");
921 let ci_99 = correlation_ci(0.5, 30, 0.99).expect("should compute");
922 assert!(ci_99.upper - ci_99.lower > ci_95.upper - ci_95.lower);
923 }
924
925 #[test]
926 fn ci_narrower_with_more_data() {
927 let ci_30 = correlation_ci(0.5, 30, 0.95).expect("should compute");
928 let ci_100 = correlation_ci(0.5, 100, 0.95).expect("should compute");
929 assert!(ci_100.upper - ci_100.lower < ci_30.upper - ci_30.lower);
930 }
931
932 #[test]
933 fn ci_edge_cases() {
934 assert!(correlation_ci(0.5, 3, 0.95).is_none()); assert!(correlation_ci(1.0, 30, 0.95).is_none()); assert!(correlation_ci(0.5, 30, 0.0).is_none()); assert!(correlation_ci(0.5, 30, 1.0).is_none());
938 }
939
940 #[test]
945 fn corr_matrix_identity() {
946 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
947 let y = [2.0, 4.0, 6.0, 8.0, 10.0];
948 let z = [5.0, 4.0, 3.0, 2.0, 1.0];
949 let mat = correlation_matrix(&[&x, &y, &z]).expect("should compute");
950
951 assert!((mat.get(0, 0) - 1.0).abs() < 1e-10);
953 assert!((mat.get(1, 1) - 1.0).abs() < 1e-10);
954 assert!((mat.get(2, 2) - 1.0).abs() < 1e-10);
955
956 assert!((mat.get(0, 1) - 1.0).abs() < 1e-10);
958
959 assert!((mat.get(0, 2) + 1.0).abs() < 1e-10);
961
962 assert!((mat.get(0, 1) - mat.get(1, 0)).abs() < 1e-15);
964 assert!((mat.get(0, 2) - mat.get(2, 0)).abs() < 1e-15);
965 }
966
967 #[test]
968 fn corr_matrix_insufficient_variables() {
969 let x = [1.0, 2.0, 3.0];
970 assert!(correlation_matrix(&[&x]).is_none());
971 }
972
973 #[test]
974 fn corr_matrix_length_mismatch() {
975 let x = [1.0, 2.0, 3.0];
976 let y = [4.0, 5.0];
977 assert!(correlation_matrix(&[&x, &y]).is_none());
978 }
979
980 #[test]
981 fn spearman_matrix_basic() {
982 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
983 let y = [1.0, 4.0, 9.0, 16.0, 25.0]; let z = [5.0, 4.0, 3.0, 2.0, 1.0];
985 let mat = spearman_matrix(&[&x, &y, &z]).expect("should compute");
986
987 assert!((mat.get(0, 1) - 1.0).abs() < 1e-10);
989 assert!((mat.get(0, 2) + 1.0).abs() < 1e-10);
991 }
992
993 #[test]
998 fn rank_no_ties() {
999 let ranks = rank_data(&[3.0, 1.0, 2.0]);
1000 assert!((ranks[0] - 3.0).abs() < 1e-10); assert!((ranks[1] - 1.0).abs() < 1e-10); assert!((ranks[2] - 2.0).abs() < 1e-10); }
1004
1005 #[test]
1006 fn rank_with_ties() {
1007 let ranks = rank_data(&[1.0, 2.0, 2.0, 4.0]);
1008 assert!((ranks[0] - 1.0).abs() < 1e-10);
1009 assert!((ranks[1] - 2.5).abs() < 1e-10); assert!((ranks[2] - 2.5).abs() < 1e-10);
1011 assert!((ranks[3] - 4.0).abs() < 1e-10);
1012 }
1013
1014 #[test]
1015 fn rank_all_same() {
1016 let ranks = rank_data(&[5.0, 5.0, 5.0]);
1017 assert!((ranks[0] - 2.0).abs() < 1e-10); assert!((ranks[1] - 2.0).abs() < 1e-10);
1019 assert!((ranks[2] - 2.0).abs() < 1e-10);
1020 }
1021
1022 #[test]
1027 fn pvalue_significant() {
1028 let x: Vec<f64> = (0..100).map(|i| i as f64).collect();
1030 let y: Vec<f64> = x.iter().map(|&v| v * 2.0 + 1.0).collect();
1031 let result = pearson(&x, &y).expect("should compute");
1032 assert!(result.p_value < 1e-10);
1033 }
1034
1035 #[test]
1036 fn pvalue_not_significant() {
1037 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
1039 let y = [5.0, 1.0, 3.0, 5.0, 1.0];
1040 let result = pearson(&x, &y).expect("should compute");
1041 assert!(result.p_value > 0.3); }
1043
1044 #[test]
1049 fn acf_lag_zero_is_one() {
1050 let data = [1.0, 3.0, 2.0, 5.0, 4.0, 7.0, 6.0, 8.0];
1051 let r = acf(&data, 3).expect("should compute");
1052 assert!((r.acf[0] - 1.0).abs() < 1e-10);
1053 }
1054
1055 #[test]
1056 fn acf_linear_trend() {
1057 let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1059 let r = acf(&data, 5).expect("should compute");
1060 assert!(r.acf[1] > 0.8, "lag-1 ACF for linear trend should be high");
1061 assert!(r.acf.len() == 6);
1062 }
1063
1064 #[test]
1065 fn acf_alternating() {
1066 let data: Vec<f64> = (0..30)
1068 .map(|i| if i % 2 == 0 { 1.0 } else { -1.0 })
1069 .collect();
1070 let r = acf(&data, 5).expect("should compute");
1071 assert!(r.acf[1] < -0.8, "alternating → negative lag-1 ACF");
1072 assert!(r.acf[2] > 0.8, "alternating → positive lag-2 ACF");
1073 }
1074
1075 #[test]
1076 fn acf_white_noise_threshold() {
1077 let data: Vec<f64> = (0..100).map(|i| ((i * 7 + 3) % 13) as f64).collect();
1078 let r = acf(&data, 10).expect("should compute");
1079 assert!((r.confidence_threshold - 1.96 / 10.0).abs() < 0.01);
1081 }
1082
1083 #[test]
1084 fn acf_edge_cases() {
1085 assert!(acf(&[1.0], 3).is_none()); assert!(acf(&[1.0, 2.0], 0).is_none()); assert!(acf(&[5.0, 5.0, 5.0, 5.0], 2).is_none()); assert!(acf(&[1.0, f64::NAN, 3.0], 1).is_none()); }
1090
1091 #[test]
1092 fn acf_max_lag_clamped() {
1093 let data = [1.0, 2.0, 3.0, 4.0, 5.0];
1094 let r = acf(&data, 100).expect("should compute");
1095 assert_eq!(r.acf.len(), 5); }
1097
1098 #[test]
1103 fn pacf_lag1_equals_acf_lag1() {
1104 let data = [1.0, 3.0, 2.0, 5.0, 4.0, 7.0, 6.0, 8.0, 5.0, 3.0];
1105 let acf_r = acf(&data, 5).expect("should compute ACF");
1106 let pacf_r = pacf(&data, 5).expect("should compute PACF");
1107 assert!(
1108 (pacf_r.pacf[0] - acf_r.acf[1]).abs() < 1e-10,
1109 "PACF[1] should equal ACF[1]: {} vs {}",
1110 pacf_r.pacf[0],
1111 acf_r.acf[1]
1112 );
1113 }
1114
1115 #[test]
1116 fn pacf_linear_trend() {
1117 let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1118 let r = pacf(&data, 5).expect("should compute");
1119 assert!(r.pacf[0].abs() > 0.5, "lag-1 PACF should be strong");
1121 }
1122
1123 #[test]
1124 fn pacf_bounded() {
1125 let data: Vec<f64> = (0..30).map(|i| (i as f64 * 0.5).sin()).collect();
1126 let r = pacf(&data, 10).expect("should compute");
1127 for (i, &p) in r.pacf.iter().enumerate() {
1128 assert!(
1129 (-1.0..=1.0).contains(&p),
1130 "PACF[{}] = {} out of [-1, 1]",
1131 i + 1,
1132 p
1133 );
1134 }
1135 }
1136
1137 #[test]
1138 fn pacf_edge_cases() {
1139 assert!(pacf(&[1.0, 2.0], 3).is_none()); assert!(pacf(&[1.0, 2.0, 3.0], 0).is_none()); }
1142}
1143
1144#[cfg(test)]
1145mod proptests {
1146 use super::*;
1147 use proptest::prelude::*;
1148
1149 fn bounded_vec(min_len: usize, max_len: usize) -> BoxedStrategy<Vec<f64>> {
1150 proptest::collection::vec(-1e6_f64..1e6, min_len..=max_len).boxed()
1151 }
1152
1153 proptest! {
1154 #[test]
1155 fn pearson_bounded(
1156 data in bounded_vec(5, 50).prop_flat_map(|x| {
1157 let n = x.len();
1158 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1159 })
1160 ) {
1161 let (x, y) = data;
1162 if let Some(result) = pearson(&x, &y) {
1163 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "r out of bounds: {}", result.r);
1164 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1165 }
1166 }
1167
1168 #[test]
1169 fn spearman_bounded(
1170 data in bounded_vec(5, 50).prop_flat_map(|x| {
1171 let n = x.len();
1172 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1173 })
1174 ) {
1175 let (x, y) = data;
1176 if let Some(result) = spearman(&x, &y) {
1177 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "r out of bounds: {}", result.r);
1178 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1179 }
1180 }
1181
1182 #[test]
1183 fn kendall_bounded(
1184 data in bounded_vec(5, 30).prop_flat_map(|x| {
1185 let n = x.len();
1186 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1187 })
1188 ) {
1189 let (x, y) = data;
1190 if let Some(result) = kendall_tau_b(&x, &y) {
1191 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "tau out of bounds: {}", result.r);
1192 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1193 }
1194 }
1195
1196 #[test]
1197 fn pearson_symmetric(
1198 data in bounded_vec(5, 50).prop_flat_map(|x| {
1199 let n = x.len();
1200 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1201 })
1202 ) {
1203 let (x, y) = data;
1204 let r_xy = pearson(&x, &y);
1205 let r_yx = pearson(&y, &x);
1206 match (r_xy, r_yx) {
1207 (Some(a), Some(b)) => {
1208 prop_assert!((a.r - b.r).abs() < 1e-10, "not symmetric: {} vs {}", a.r, b.r);
1209 }
1210 (None, None) => {}
1211 _ => prop_assert!(false, "one is None but not the other"),
1212 }
1213 }
1214
1215 #[test]
1216 fn fisher_z_roundtrip_prop(r in -0.99_f64..0.99) {
1217 let z = fisher_z(r).expect("should compute");
1218 let r_back = fisher_z_inv(z);
1219 prop_assert!((r - r_back).abs() < 1e-10);
1220 }
1221
1222 #[test]
1223 fn ci_contains_true_r(
1224 r in -0.99_f64..0.99,
1225 n in 10_usize..200
1226 ) {
1227 let ci = correlation_ci(r, n, 0.95).expect("should compute");
1228 prop_assert!(ci.lower < ci.upper, "CI inverted");
1229 prop_assert!(ci.lower >= -1.0 && ci.upper <= 1.0, "CI out of bounds");
1230 }
1231
1232 #[test]
1233 fn acf_bounded_prop(
1234 data in proptest::collection::vec(-1e3_f64..1e3, 5..=50),
1235 ) {
1236 if let Some(r) = acf(&data, 10) {
1237 prop_assert!((r.acf[0] - 1.0).abs() < 1e-10, "ACF[0] must be 1.0");
1238 for (i, &v) in r.acf.iter().enumerate() {
1239 prop_assert!((-1.0..=1.0).contains(&v), "ACF[{i}] = {v} out of [-1,1]");
1240 }
1241 }
1242 }
1243
1244 #[test]
1245 fn pacf_bounded_prop(
1246 data in proptest::collection::vec(-1e3_f64..1e3, 5..=50),
1247 ) {
1248 if let Some(r) = pacf(&data, 5) {
1249 for (i, &v) in r.pacf.iter().enumerate() {
1250 prop_assert!((-1.0 - 1e-10..=1.0 + 1e-10).contains(&v), "PACF[{i}] = {v} out of bounds");
1251 }
1252 }
1253 }
1254 }
1255}