1use u_numflow::matrix::Matrix;
26use u_numflow::special;
27use u_numflow::stats;
28
29#[derive(Debug, Clone, Copy)]
31pub struct CorrelationResult {
32 pub r: f64,
34 pub p_value: f64,
36 pub n: usize,
38}
39
40#[derive(Debug, Clone, Copy)]
42pub struct CorrelationCI {
43 pub lower: f64,
45 pub upper: f64,
47 pub confidence: f64,
49}
50
51pub fn pearson(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
84 let n = x.len();
85 if n < 3 || n != y.len() {
86 return None;
87 }
88
89 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
90 return None;
91 }
92
93 let cov = stats::covariance(x, y)?;
94 let sx = stats::std_dev(x)?;
95 let sy = stats::std_dev(y)?;
96
97 if sx < 1e-300 || sy < 1e-300 {
98 return None; }
100
101 let r = (cov / (sx * sy)).clamp(-1.0, 1.0);
102 let p_value = correlation_p_value(r, n);
103
104 Some(CorrelationResult { r, p_value, n })
105}
106
107pub fn spearman(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
140 let n = x.len();
141 if n < 3 || n != y.len() {
142 return None;
143 }
144
145 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
146 return None;
147 }
148
149 let rx = rank_data(x);
150 let ry = rank_data(y);
151
152 let cov = stats::covariance(&rx, &ry)?;
154 let srx = stats::std_dev(&rx)?;
155 let sry = stats::std_dev(&ry)?;
156
157 if srx < 1e-300 || sry < 1e-300 {
158 return None;
159 }
160
161 let r = (cov / (srx * sry)).clamp(-1.0, 1.0);
162 let p_value = correlation_p_value(r, n);
163
164 Some(CorrelationResult { r, p_value, n })
165}
166
167pub fn kendall_tau_b(x: &[f64], y: &[f64]) -> Option<CorrelationResult> {
206 let n = x.len();
207 if n < 3 || n != y.len() {
208 return None;
209 }
210
211 if x.iter().any(|v| !v.is_finite()) || y.iter().any(|v| !v.is_finite()) {
212 return None;
213 }
214
215 let mut concordant: i64 = 0;
216 let mut discordant: i64 = 0;
217 let mut ties_x: i64 = 0;
218 let mut ties_y: i64 = 0;
219 let mut _ties_xy: i64 = 0;
220
221 for i in 0..n {
222 for j in (i + 1)..n {
223 let dx = x[i] - x[j];
224 let dy = y[i] - y[j];
225 let product = dx * dy;
226
227 if dx == 0.0 && dy == 0.0 {
228 _ties_xy += 1;
229 ties_x += 1;
230 ties_y += 1;
231 } else if dx == 0.0 {
232 ties_x += 1;
233 } else if dy == 0.0 {
234 ties_y += 1;
235 } else if product > 0.0 {
236 concordant += 1;
237 } else {
238 discordant += 1;
239 }
240 }
241 }
242
243 let n0 = (n as i64) * (n as i64 - 1) / 2;
244 let denom_sq = (n0 - ties_x) as f64 * (n0 - ties_y) as f64;
245
246 if denom_sq <= 0.0 {
247 return None; }
249
250 let tau = (concordant - discordant) as f64 / denom_sq.sqrt();
251 let tau = tau.clamp(-1.0, 1.0);
252
253 let nf = n as f64;
258 let v0 = nf * (nf - 1.0) * (2.0 * nf + 5.0);
259 let vt = compute_tie_variance_term(x);
260 let vu = compute_tie_variance_term(y);
261 let var_s = (v0 - vt - vu) / 18.0;
262
263 let p_value = if var_s > 0.0 {
264 let s = (concordant - discordant) as f64;
265 let z = s / var_s.sqrt();
266 2.0 * (1.0 - special::standard_normal_cdf(z.abs()))
267 } else {
268 1.0
269 };
270
271 Some(CorrelationResult { r: tau, p_value, n })
272}
273
274pub fn fisher_z(r: f64) -> Option<f64> {
292 if r <= -1.0 || r >= 1.0 || !r.is_finite() {
293 return None;
294 }
295 Some(r.atanh())
296}
297
298pub fn fisher_z_inv(z: f64) -> f64 {
300 z.tanh()
301}
302
303pub fn correlation_ci(r: f64, n: usize, confidence: f64) -> Option<CorrelationCI> {
328 if n < 4 || r <= -1.0 || r >= 1.0 || !r.is_finite() {
329 return None;
330 }
331 if confidence <= 0.0 || confidence >= 1.0 {
332 return None;
333 }
334
335 let z = r.atanh();
336 let se = 1.0 / ((n as f64 - 3.0).sqrt());
337 let alpha = 1.0 - confidence;
338 let z_crit = special::inverse_normal_cdf(1.0 - alpha / 2.0);
339
340 let z_lower = z - z_crit * se;
341 let z_upper = z + z_crit * se;
342
343 Some(CorrelationCI {
344 lower: z_lower.tanh(),
345 upper: z_upper.tanh(),
346 confidence,
347 })
348}
349
350pub fn correlation_matrix(variables: &[&[f64]]) -> Option<Matrix> {
380 let p = variables.len();
381 if p < 2 {
382 return None;
383 }
384 let n = variables[0].len();
385 if n < 3 {
386 return None;
387 }
388 for v in variables {
389 if v.len() != n {
390 return None;
391 }
392 }
393
394 let mut sds = Vec::with_capacity(p);
396 for v in variables {
397 let m = stats::mean(v)?;
398 if !m.is_finite() {
399 return None;
400 }
401 let s = stats::std_dev(v)?;
402 if !s.is_finite() || s < 1e-300 {
403 return None;
404 }
405 sds.push(s);
406 }
407
408 let mut data = vec![0.0; p * p];
409
410 for i in 0..p {
411 data[i * p + i] = 1.0; for j in (i + 1)..p {
413 let cov = stats::covariance(variables[i], variables[j])?;
414 let r = (cov / (sds[i] * sds[j])).clamp(-1.0, 1.0);
415 data[i * p + j] = r;
416 data[j * p + i] = r;
417 }
418 }
419
420 Matrix::new(p, p, data).ok()
421}
422
423pub fn spearman_matrix(variables: &[&[f64]]) -> Option<Matrix> {
427 let p = variables.len();
428 if p < 2 {
429 return None;
430 }
431 let n = variables[0].len();
432 if n < 3 {
433 return None;
434 }
435 for v in variables {
436 if v.len() != n || v.iter().any(|x| !x.is_finite()) {
437 return None;
438 }
439 }
440
441 let ranked: Vec<Vec<f64>> = variables.iter().map(|v| rank_data(v)).collect();
443 let ranked_refs: Vec<&[f64]> = ranked.iter().map(|v| v.as_slice()).collect();
444
445 correlation_matrix(&ranked_refs)
446}
447
448pub fn kendall_matrix(variables: &[&[f64]]) -> Option<Matrix> {
478 let p = variables.len();
479 if p < 2 {
480 return None;
481 }
482 let n = variables[0].len();
483 if n < 3 {
484 return None;
485 }
486 for v in variables {
487 if v.len() != n || v.iter().any(|x| !x.is_finite()) {
488 return None;
489 }
490 }
491
492 let mut data = vec![0.0_f64; p * p];
493 for i in 0..p {
494 data[i * p + i] = 1.0;
495 for j in (i + 1)..p {
496 let r = kendall_tau_b(variables[i], variables[j])?.r;
497 data[i * p + j] = r;
498 data[j * p + i] = r;
499 }
500 }
501 Matrix::new(p, p, data).ok()
502}
503
504fn correlation_p_value(r: f64, n: usize) -> f64 {
512 if n < 3 {
513 return 1.0;
514 }
515 let df = (n - 2) as f64;
516 let r2 = r * r;
517
518 if r2 >= 1.0 - 1e-15 {
520 return 0.0; }
522
523 let t = r * (df / (1.0 - r2)).sqrt();
524 2.0 * (1.0 - special::t_distribution_cdf(t.abs(), df))
525}
526
527fn rank_data(data: &[f64]) -> Vec<f64> {
531 let n = data.len();
532 let mut indexed: Vec<(usize, f64)> = data.iter().copied().enumerate().collect();
533 indexed.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
534
535 let mut ranks = vec![0.0; n];
536 let mut i = 0;
537 while i < n {
538 let mut j = i;
539 while j < n && (indexed[j].1 - indexed[i].1).abs() < 1e-300 {
541 j += 1;
542 }
543 let avg_rank = (i + j) as f64 / 2.0 + 0.5;
545 for item in indexed.iter().take(j).skip(i) {
546 ranks[item.0] = avg_rank;
547 }
548 i = j;
549 }
550
551 ranks
552}
553
554fn compute_tie_variance_term(data: &[f64]) -> f64 {
558 let mut sorted: Vec<f64> = data.to_vec();
559 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
560
561 let mut result = 0.0;
562 let mut i = 0;
563 while i < sorted.len() {
564 let mut j = i;
565 while j < sorted.len() && (sorted[j] - sorted[i]).abs() < 1e-300 {
566 j += 1;
567 }
568 let t = (j - i) as f64;
569 if t > 1.0 {
570 result += t * (t - 1.0) * (2.0 * t + 5.0);
571 }
572 i = j;
573 }
574 result
575}
576
577#[derive(Debug, Clone)]
583pub struct AcfResult {
584 pub acf: Vec<f64>,
587 pub confidence_threshold: f64,
590}
591
592#[derive(Debug, Clone)]
594pub struct PacfResult {
595 pub pacf: Vec<f64>,
597 pub confidence_threshold: f64,
599}
600
601pub fn acf(data: &[f64], max_lag: usize) -> Option<AcfResult> {
630 let n = data.len();
631 if n < 2 || max_lag == 0 || data.iter().any(|v| !v.is_finite()) {
632 return None;
633 }
634 let max_lag = max_lag.min(n - 1);
635 let nf = n as f64;
636
637 let mean = data.iter().sum::<f64>() / nf;
638
639 let c0: f64 = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / nf;
641
642 if c0 <= 0.0 {
643 return None; }
645
646 let mut acf_vals = Vec::with_capacity(max_lag + 1);
647 acf_vals.push(1.0); for lag in 1..=max_lag {
650 let ck: f64 = data[..n - lag]
651 .iter()
652 .zip(&data[lag..])
653 .map(|(&xt, &xt_h)| (xt - mean) * (xt_h - mean))
654 .sum::<f64>()
655 / nf;
656 acf_vals.push(ck / c0);
657 }
658
659 let threshold = 1.96 / nf.sqrt();
660
661 Some(AcfResult {
662 acf: acf_vals,
663 confidence_threshold: threshold,
664 })
665}
666
667pub fn pacf(data: &[f64], max_lag: usize) -> Option<PacfResult> {
698 let n = data.len();
699 if n < 3 || max_lag == 0 {
700 return None;
701 }
702
703 let acf_result = acf(data, max_lag)?;
705 let rho = &acf_result.acf;
706 let max_lag = rho.len() - 1; if max_lag == 0 {
709 return None;
710 }
711
712 let mut pacf_vals = Vec::with_capacity(max_lag);
713
714 pacf_vals.push(rho[1]);
716
717 if max_lag == 1 {
718 return Some(PacfResult {
719 pacf: pacf_vals,
720 confidence_threshold: 1.96 / (n as f64).sqrt(),
721 });
722 }
723
724 let mut phi_prev = vec![rho[1]];
726
727 for h in 2..=max_lag {
728 let mut num = rho[h];
730 for j in 0..h - 1 {
731 num -= phi_prev[j] * rho[h - 1 - j];
732 }
733
734 let mut den = 1.0;
736 for j in 0..h - 1 {
737 den -= phi_prev[j] * rho[j + 1];
738 }
739
740 let phi_hh = if den.abs() > 1e-14 { num / den } else { 0.0 };
741
742 let mut phi_new = Vec::with_capacity(h);
744 for j in 0..h - 1 {
745 phi_new.push(phi_prev[j] - phi_hh * phi_prev[h - 2 - j]);
746 }
747 phi_new.push(phi_hh);
748
749 pacf_vals.push(phi_hh);
750 phi_prev = phi_new;
751 }
752
753 Some(PacfResult {
754 pacf: pacf_vals,
755 confidence_threshold: 1.96 / (n as f64).sqrt(),
756 })
757}
758
759#[cfg(test)]
760mod tests {
761 use super::*;
762
763 #[test]
768 fn pearson_perfect_positive() {
769 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
770 let y = [2.0, 4.0, 6.0, 8.0, 10.0];
771 let result = pearson(&x, &y).expect("should compute");
772 assert!((result.r - 1.0).abs() < 1e-10);
773 assert!(result.p_value < 1e-10);
774 }
775
776 #[test]
777 fn pearson_perfect_negative() {
778 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
779 let y = [10.0, 8.0, 6.0, 4.0, 2.0];
780 let result = pearson(&x, &y).expect("should compute");
781 assert!((result.r + 1.0).abs() < 1e-10);
782 assert!(result.p_value < 1e-10);
783 }
784
785 #[test]
786 fn pearson_uncorrelated() {
787 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
789 let y = [3.0, 1.0, 5.0, 1.0, 5.0];
790 let result = pearson(&x, &y).expect("should compute");
791 assert!(result.r.abs() < 0.5);
792 }
793
794 #[test]
795 fn pearson_known_value() {
796 let x = [68.0, 71.0, 62.0, 75.0, 58.0, 60.0, 67.0, 68.0, 71.0, 69.0];
798 let y = [4.1, 4.6, 3.8, 4.4, 3.2, 3.1, 3.8, 4.1, 4.3, 3.7];
799 let result = pearson(&x, &y).expect("should compute");
800 assert!((result.r - 0.8816).abs() < 0.01, "r = {}", result.r);
802 }
803
804 #[test]
813 fn pearson_numeric_reference() {
814 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
815 let y = [2.0, 4.0, 5.0, 4.0, 5.0];
816 let result = pearson(&x, &y).expect("should compute");
817
818 let expected_r = 6.0_f64 / (60.0_f64).sqrt(); assert!(
820 (result.r - expected_r).abs() < 1e-3,
821 "r expected {:.6}, got {}",
822 expected_r,
823 result.r
824 );
825 assert!(
826 (result.r - 0.7746).abs() < 1e-3,
827 "r expected ≈0.7746, got {}",
828 result.r
829 );
830 }
831
832 #[test]
833 fn pearson_insufficient_data() {
834 assert!(pearson(&[1.0, 2.0], &[3.0, 4.0]).is_none());
835 assert!(pearson(&[1.0], &[2.0]).is_none());
836 }
837
838 #[test]
839 fn pearson_length_mismatch() {
840 assert!(pearson(&[1.0, 2.0, 3.0], &[4.0, 5.0]).is_none());
841 }
842
843 #[test]
844 fn pearson_zero_variance() {
845 assert!(pearson(&[5.0, 5.0, 5.0], &[1.0, 2.0, 3.0]).is_none());
846 }
847
848 #[test]
849 fn pearson_nan_input() {
850 assert!(pearson(&[1.0, f64::NAN, 3.0], &[4.0, 5.0, 6.0]).is_none());
851 }
852
853 #[test]
858 fn spearman_perfect_monotone() {
859 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
860 let y = [1.0, 4.0, 9.0, 16.0, 25.0]; let result = spearman(&x, &y).expect("should compute");
862 assert!((result.r - 1.0).abs() < 1e-10);
863 }
864
865 #[test]
866 fn spearman_perfect_negative() {
867 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
868 let y = [5.0, 4.0, 3.0, 2.0, 1.0];
869 let result = spearman(&x, &y).expect("should compute");
870 assert!((result.r + 1.0).abs() < 1e-10);
871 }
872
873 #[test]
874 fn spearman_with_ties() {
875 let x = [1.0, 2.0, 2.0, 4.0, 5.0];
876 let y = [1.0, 3.0, 3.0, 4.0, 5.0];
877 let result = spearman(&x, &y).expect("should compute");
878 assert!(result.r > 0.9); }
880
881 #[test]
882 fn spearman_nonlinear_monotone() {
883 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
885 let y: Vec<f64> = x.iter().map(|&v: &f64| v.powi(3)).collect();
886 let result = spearman(&x, &y).expect("should compute");
887 assert!((result.r - 1.0).abs() < 1e-10);
888 }
889
890 #[test]
895 fn kendall_perfect_concordance() {
896 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
897 let y = [1.0, 2.0, 3.0, 4.0, 5.0];
898 let result = kendall_tau_b(&x, &y).expect("should compute");
899 assert!((result.r - 1.0).abs() < 1e-10);
900 }
901
902 #[test]
903 fn kendall_perfect_discordance() {
904 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
905 let y = [5.0, 4.0, 3.0, 2.0, 1.0];
906 let result = kendall_tau_b(&x, &y).expect("should compute");
907 assert!((result.r + 1.0).abs() < 1e-10);
908 }
909
910 #[test]
911 fn kendall_known_value() {
912 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
914 let y = [3.0, 4.0, 1.0, 2.0, 5.0];
915 let result = kendall_tau_b(&x, &y).expect("should compute");
916 assert!((result.r - 0.2).abs() < 0.01);
917 }
918
919 #[test]
920 fn kendall_with_ties() {
921 let x = [1.0, 2.0, 2.0, 4.0, 5.0];
922 let y = [1.0, 3.0, 3.0, 4.0, 5.0];
923 let result = kendall_tau_b(&x, &y).expect("should compute");
924 assert!(result.r > 0.8);
925 }
926
927 #[test]
928 fn kendall_all_ties() {
929 let x = [1.0, 1.0, 1.0];
930 let y = [2.0, 2.0, 2.0];
931 assert!(kendall_tau_b(&x, &y).is_none()); }
933
934 #[test]
939 fn fisher_z_zero() {
940 let z = fisher_z(0.0).expect("should compute");
941 assert!(z.abs() < 1e-15);
942 }
943
944 #[test]
945 fn fisher_z_roundtrip() {
946 for &r in &[0.0, 0.3, 0.5, 0.8, 0.95, -0.5, -0.95] {
947 let z = fisher_z(r).expect("should compute");
948 let r_back = fisher_z_inv(z);
949 assert!((r - r_back).abs() < 1e-10, "Roundtrip failed for r={r}");
950 }
951 }
952
953 #[test]
954 fn fisher_z_boundary() {
955 assert!(fisher_z(1.0).is_none());
956 assert!(fisher_z(-1.0).is_none());
957 assert!(fisher_z(1.5).is_none());
958 assert!(fisher_z(f64::NAN).is_none());
959 }
960
961 #[test]
966 fn ci_contains_r() {
967 let ci = correlation_ci(0.6, 50, 0.95).expect("should compute");
968 assert!(ci.lower < 0.6);
969 assert!(ci.upper > 0.6);
970 assert!(ci.lower > -1.0);
971 assert!(ci.upper < 1.0);
972 }
973
974 #[test]
975 fn ci_wider_at_higher_confidence() {
976 let ci_95 = correlation_ci(0.5, 30, 0.95).expect("should compute");
977 let ci_99 = correlation_ci(0.5, 30, 0.99).expect("should compute");
978 assert!(ci_99.upper - ci_99.lower > ci_95.upper - ci_95.lower);
979 }
980
981 #[test]
982 fn ci_narrower_with_more_data() {
983 let ci_30 = correlation_ci(0.5, 30, 0.95).expect("should compute");
984 let ci_100 = correlation_ci(0.5, 100, 0.95).expect("should compute");
985 assert!(ci_100.upper - ci_100.lower < ci_30.upper - ci_30.lower);
986 }
987
988 #[test]
989 fn ci_edge_cases() {
990 assert!(correlation_ci(0.5, 3, 0.95).is_none()); assert!(correlation_ci(1.0, 30, 0.95).is_none()); assert!(correlation_ci(0.5, 30, 0.0).is_none()); assert!(correlation_ci(0.5, 30, 1.0).is_none());
994 }
995
996 #[test]
1001 fn corr_matrix_identity() {
1002 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
1003 let y = [2.0, 4.0, 6.0, 8.0, 10.0];
1004 let z = [5.0, 4.0, 3.0, 2.0, 1.0];
1005 let mat = correlation_matrix(&[&x, &y, &z]).expect("should compute");
1006
1007 assert!((mat.get(0, 0) - 1.0).abs() < 1e-10);
1009 assert!((mat.get(1, 1) - 1.0).abs() < 1e-10);
1010 assert!((mat.get(2, 2) - 1.0).abs() < 1e-10);
1011
1012 assert!((mat.get(0, 1) - 1.0).abs() < 1e-10);
1014
1015 assert!((mat.get(0, 2) + 1.0).abs() < 1e-10);
1017
1018 assert!((mat.get(0, 1) - mat.get(1, 0)).abs() < 1e-15);
1020 assert!((mat.get(0, 2) - mat.get(2, 0)).abs() < 1e-15);
1021 }
1022
1023 #[test]
1024 fn corr_matrix_insufficient_variables() {
1025 let x = [1.0, 2.0, 3.0];
1026 assert!(correlation_matrix(&[&x]).is_none());
1027 }
1028
1029 #[test]
1030 fn corr_matrix_length_mismatch() {
1031 let x = [1.0, 2.0, 3.0];
1032 let y = [4.0, 5.0];
1033 assert!(correlation_matrix(&[&x, &y]).is_none());
1034 }
1035
1036 #[test]
1037 fn spearman_matrix_basic() {
1038 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
1039 let y = [1.0, 4.0, 9.0, 16.0, 25.0]; let z = [5.0, 4.0, 3.0, 2.0, 1.0];
1041 let mat = spearman_matrix(&[&x, &y, &z]).expect("should compute");
1042
1043 assert!((mat.get(0, 1) - 1.0).abs() < 1e-10);
1045 assert!((mat.get(0, 2) + 1.0).abs() < 1e-10);
1047 }
1048
1049 #[test]
1050 fn kendall_matrix_basic() {
1051 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
1052 let y = [2.0, 4.0, 6.0, 8.0, 10.0]; let z = [5.0, 4.0, 3.0, 2.0, 1.0]; let mat = kendall_matrix(&[&x, &y, &z]).expect("should compute");
1055
1056 assert_eq!(mat.rows(), 3);
1057 assert!((mat.get(0, 0) - 1.0).abs() < 1e-15);
1058 assert!((mat.get(1, 1) - 1.0).abs() < 1e-15);
1059 assert!((mat.get(2, 2) - 1.0).abs() < 1e-15);
1060
1061 assert!((mat.get(0, 1) - 1.0).abs() < 1e-10);
1062 assert!((mat.get(0, 2) + 1.0).abs() < 1e-10);
1063
1064 assert!((mat.get(0, 1) - mat.get(1, 0)).abs() < 1e-15);
1066 assert!((mat.get(0, 2) - mat.get(2, 0)).abs() < 1e-15);
1067 }
1068
1069 #[test]
1070 fn kendall_matrix_insufficient_variables() {
1071 let x = [1.0, 2.0, 3.0];
1072 assert!(kendall_matrix(&[&x]).is_none());
1073 }
1074
1075 #[test]
1076 fn kendall_matrix_length_mismatch() {
1077 let x = [1.0, 2.0, 3.0, 4.0];
1078 let y = [4.0, 5.0];
1079 assert!(kendall_matrix(&[&x, &y]).is_none());
1080 }
1081
1082 #[test]
1083 fn kendall_matrix_with_ties() {
1084 let x = [1.0, 2.0, 2.0, 3.0, 4.0];
1085 let y = [1.0, 2.0, 3.0, 4.0, 5.0];
1086 let mat = kendall_matrix(&[&x, &y]).expect("should compute");
1087 assert!(mat.get(0, 1) > 0.7);
1088 }
1089
1090 #[test]
1095 fn rank_no_ties() {
1096 let ranks = rank_data(&[3.0, 1.0, 2.0]);
1097 assert!((ranks[0] - 3.0).abs() < 1e-10); assert!((ranks[1] - 1.0).abs() < 1e-10); assert!((ranks[2] - 2.0).abs() < 1e-10); }
1101
1102 #[test]
1103 fn rank_with_ties() {
1104 let ranks = rank_data(&[1.0, 2.0, 2.0, 4.0]);
1105 assert!((ranks[0] - 1.0).abs() < 1e-10);
1106 assert!((ranks[1] - 2.5).abs() < 1e-10); assert!((ranks[2] - 2.5).abs() < 1e-10);
1108 assert!((ranks[3] - 4.0).abs() < 1e-10);
1109 }
1110
1111 #[test]
1112 fn rank_all_same() {
1113 let ranks = rank_data(&[5.0, 5.0, 5.0]);
1114 assert!((ranks[0] - 2.0).abs() < 1e-10); assert!((ranks[1] - 2.0).abs() < 1e-10);
1116 assert!((ranks[2] - 2.0).abs() < 1e-10);
1117 }
1118
1119 #[test]
1124 fn pvalue_significant() {
1125 let x: Vec<f64> = (0..100).map(|i| i as f64).collect();
1127 let y: Vec<f64> = x.iter().map(|&v| v * 2.0 + 1.0).collect();
1128 let result = pearson(&x, &y).expect("should compute");
1129 assert!(result.p_value < 1e-10);
1130 }
1131
1132 #[test]
1133 fn pvalue_not_significant() {
1134 let x = [1.0, 2.0, 3.0, 4.0, 5.0];
1136 let y = [5.0, 1.0, 3.0, 5.0, 1.0];
1137 let result = pearson(&x, &y).expect("should compute");
1138 assert!(result.p_value > 0.3); }
1140
1141 #[test]
1146 fn acf_lag_zero_is_one() {
1147 let data = [1.0, 3.0, 2.0, 5.0, 4.0, 7.0, 6.0, 8.0];
1148 let r = acf(&data, 3).expect("should compute");
1149 assert!((r.acf[0] - 1.0).abs() < 1e-10);
1150 }
1151
1152 #[test]
1153 fn acf_linear_trend() {
1154 let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1156 let r = acf(&data, 5).expect("should compute");
1157 assert!(r.acf[1] > 0.8, "lag-1 ACF for linear trend should be high");
1158 assert!(r.acf.len() == 6);
1159 }
1160
1161 #[test]
1162 fn acf_alternating() {
1163 let data: Vec<f64> = (0..30)
1165 .map(|i| if i % 2 == 0 { 1.0 } else { -1.0 })
1166 .collect();
1167 let r = acf(&data, 5).expect("should compute");
1168 assert!(r.acf[1] < -0.8, "alternating → negative lag-1 ACF");
1169 assert!(r.acf[2] > 0.8, "alternating → positive lag-2 ACF");
1170 }
1171
1172 #[test]
1173 fn acf_white_noise_threshold() {
1174 let data: Vec<f64> = (0..100).map(|i| ((i * 7 + 3) % 13) as f64).collect();
1175 let r = acf(&data, 10).expect("should compute");
1176 assert!((r.confidence_threshold - 1.96 / 10.0).abs() < 0.01);
1178 }
1179
1180 #[test]
1181 fn acf_edge_cases() {
1182 assert!(acf(&[1.0], 3).is_none()); assert!(acf(&[1.0, 2.0], 0).is_none()); assert!(acf(&[5.0, 5.0, 5.0, 5.0], 2).is_none()); assert!(acf(&[1.0, f64::NAN, 3.0], 1).is_none()); }
1187
1188 #[test]
1189 fn acf_max_lag_clamped() {
1190 let data = [1.0, 2.0, 3.0, 4.0, 5.0];
1191 let r = acf(&data, 100).expect("should compute");
1192 assert_eq!(r.acf.len(), 5); }
1194
1195 #[test]
1200 fn pacf_lag1_equals_acf_lag1() {
1201 let data = [1.0, 3.0, 2.0, 5.0, 4.0, 7.0, 6.0, 8.0, 5.0, 3.0];
1202 let acf_r = acf(&data, 5).expect("should compute ACF");
1203 let pacf_r = pacf(&data, 5).expect("should compute PACF");
1204 assert!(
1205 (pacf_r.pacf[0] - acf_r.acf[1]).abs() < 1e-10,
1206 "PACF[1] should equal ACF[1]: {} vs {}",
1207 pacf_r.pacf[0],
1208 acf_r.acf[1]
1209 );
1210 }
1211
1212 #[test]
1213 fn pacf_linear_trend() {
1214 let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1215 let r = pacf(&data, 5).expect("should compute");
1216 assert!(r.pacf[0].abs() > 0.5, "lag-1 PACF should be strong");
1218 }
1219
1220 #[test]
1221 fn pacf_bounded() {
1222 let data: Vec<f64> = (0..30).map(|i| (i as f64 * 0.5).sin()).collect();
1223 let r = pacf(&data, 10).expect("should compute");
1224 for (i, &p) in r.pacf.iter().enumerate() {
1225 assert!(
1226 (-1.0..=1.0).contains(&p),
1227 "PACF[{}] = {} out of [-1, 1]",
1228 i + 1,
1229 p
1230 );
1231 }
1232 }
1233
1234 #[test]
1235 fn pacf_edge_cases() {
1236 assert!(pacf(&[1.0, 2.0], 3).is_none()); assert!(pacf(&[1.0, 2.0, 3.0], 0).is_none()); }
1239}
1240
1241#[cfg(test)]
1242mod proptests {
1243 use super::*;
1244 use proptest::prelude::*;
1245
1246 fn bounded_vec(min_len: usize, max_len: usize) -> BoxedStrategy<Vec<f64>> {
1247 proptest::collection::vec(-1e6_f64..1e6, min_len..=max_len).boxed()
1248 }
1249
1250 proptest! {
1251 #[test]
1252 fn pearson_bounded(
1253 data in bounded_vec(5, 50).prop_flat_map(|x| {
1254 let n = x.len();
1255 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1256 })
1257 ) {
1258 let (x, y) = data;
1259 if let Some(result) = pearson(&x, &y) {
1260 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "r out of bounds: {}", result.r);
1261 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1262 }
1263 }
1264
1265 #[test]
1266 fn spearman_bounded(
1267 data in bounded_vec(5, 50).prop_flat_map(|x| {
1268 let n = x.len();
1269 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1270 })
1271 ) {
1272 let (x, y) = data;
1273 if let Some(result) = spearman(&x, &y) {
1274 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "r out of bounds: {}", result.r);
1275 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1276 }
1277 }
1278
1279 #[test]
1280 fn kendall_bounded(
1281 data in bounded_vec(5, 30).prop_flat_map(|x| {
1282 let n = x.len();
1283 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1284 })
1285 ) {
1286 let (x, y) = data;
1287 if let Some(result) = kendall_tau_b(&x, &y) {
1288 prop_assert!(result.r >= -1.0 && result.r <= 1.0, "tau out of bounds: {}", result.r);
1289 prop_assert!(result.p_value >= 0.0 && result.p_value <= 1.0, "p out of bounds: {}", result.p_value);
1290 }
1291 }
1292
1293 #[test]
1294 fn pearson_symmetric(
1295 data in bounded_vec(5, 50).prop_flat_map(|x| {
1296 let n = x.len();
1297 (Just(x), proptest::collection::vec(-1e6_f64..1e6, n..=n))
1298 })
1299 ) {
1300 let (x, y) = data;
1301 let r_xy = pearson(&x, &y);
1302 let r_yx = pearson(&y, &x);
1303 match (r_xy, r_yx) {
1304 (Some(a), Some(b)) => {
1305 prop_assert!((a.r - b.r).abs() < 1e-10, "not symmetric: {} vs {}", a.r, b.r);
1306 }
1307 (None, None) => {}
1308 _ => prop_assert!(false, "one is None but not the other"),
1309 }
1310 }
1311
1312 #[test]
1313 fn fisher_z_roundtrip_prop(r in -0.99_f64..0.99) {
1314 let z = fisher_z(r).expect("should compute");
1315 let r_back = fisher_z_inv(z);
1316 prop_assert!((r - r_back).abs() < 1e-10);
1317 }
1318
1319 #[test]
1320 fn ci_contains_true_r(
1321 r in -0.99_f64..0.99,
1322 n in 10_usize..200
1323 ) {
1324 let ci = correlation_ci(r, n, 0.95).expect("should compute");
1325 prop_assert!(ci.lower < ci.upper, "CI inverted");
1326 prop_assert!(ci.lower >= -1.0 && ci.upper <= 1.0, "CI out of bounds");
1327 }
1328
1329 #[test]
1330 fn acf_bounded_prop(
1331 data in proptest::collection::vec(-1e3_f64..1e3, 5..=50),
1332 ) {
1333 if let Some(r) = acf(&data, 10) {
1334 prop_assert!((r.acf[0] - 1.0).abs() < 1e-10, "ACF[0] must be 1.0");
1335 for (i, &v) in r.acf.iter().enumerate() {
1336 prop_assert!((-1.0..=1.0).contains(&v), "ACF[{i}] = {v} out of [-1,1]");
1337 }
1338 }
1339 }
1340
1341 #[test]
1342 fn pacf_bounded_prop(
1343 data in proptest::collection::vec(-1e3_f64..1e3, 5..=50),
1344 ) {
1345 if let Some(r) = pacf(&data, 5) {
1346 for (i, &v) in r.pacf.iter().enumerate() {
1347 prop_assert!((-1.0 - 1e-10..=1.0 + 1e-10).contains(&v), "PACF[{i}] = {v} out of bounds");
1348 }
1349 }
1350 }
1351 }
1352}