Skip to main content

proof_engine/math/
statistics.rs

1//! Statistics and probability: descriptive stats, distributions, hypothesis testing,
2//! regression, Bayesian inference, random number generation, information theory.
3
4use std::f64::consts::PI;
5
6// ============================================================
7// RANDOM NUMBER GENERATORS
8// ============================================================
9
10/// Trait for random number generators.
11pub trait Rng {
12    fn next_u64(&mut self) -> u64;
13    fn next_f64(&mut self) -> f64 {
14        (self.next_u64() >> 11) as f64 / (1u64 << 53) as f64
15    }
16    fn next_u32(&mut self) -> u32 {
17        (self.next_u64() >> 32) as u32
18    }
19}
20
21/// Xorshift64 — fast, simple 64-bit RNG.
22#[derive(Clone, Debug)]
23pub struct Xorshift64 {
24    pub state: u64,
25}
26
27impl Xorshift64 {
28    pub fn new(seed: u64) -> Self { Self { state: seed.max(1) } }
29}
30
31impl Rng for Xorshift64 {
32    fn next_u64(&mut self) -> u64 {
33        let mut x = self.state;
34        x ^= x << 13;
35        x ^= x >> 7;
36        x ^= x << 17;
37        self.state = x;
38        x
39    }
40}
41
42/// PCG32 — Permuted Congruential Generator.
43#[derive(Clone, Debug)]
44pub struct Pcg32 {
45    pub state: u64,
46    pub inc: u64,
47}
48
49impl Pcg32 {
50    pub fn new(seed: u64, seq: u64) -> Self {
51        let mut rng = Self { state: 0, inc: (seq << 1) | 1 };
52        rng.state = rng.state.wrapping_add(seed);
53        rng.next_u64();
54        rng
55    }
56}
57
58impl Rng for Pcg32 {
59    fn next_u64(&mut self) -> u64 {
60        let lo = self.next_u32() as u64;
61        let hi = self.next_u32() as u64;
62        lo | (hi << 32)
63    }
64
65    fn next_u32(&mut self) -> u32 {
66        let old_state = self.state;
67        self.state = old_state
68            .wrapping_mul(6_364_136_223_846_793_005)
69            .wrapping_add(self.inc);
70        let xorshifted = (((old_state >> 18) ^ old_state) >> 27) as u32;
71        let rot = (old_state >> 59) as u32;
72        xorshifted.rotate_right(rot)
73    }
74}
75
76/// SplitMix64 — fast 64-bit generator suitable as seed scrambler.
77#[derive(Clone, Debug)]
78pub struct SplitMix64 {
79    pub state: u64,
80}
81
82impl SplitMix64 {
83    pub fn new(seed: u64) -> Self { Self { state: seed } }
84}
85
86impl Rng for SplitMix64 {
87    fn next_u64(&mut self) -> u64 {
88        self.state = self.state.wrapping_add(0x9e3779b97f4a7c15);
89        let mut z = self.state;
90        z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
91        z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
92        z ^ (z >> 31)
93    }
94}
95
96/// Linear Congruential Generator.
97#[derive(Clone, Debug)]
98pub struct Lcg {
99    pub state: u64,
100    pub a: u64,
101    pub c: u64,
102    pub m: u64,
103}
104
105impl Lcg {
106    pub fn new(seed: u64) -> Self {
107        Self {
108            state: seed,
109            a: 6_364_136_223_846_793_005,
110            c: 1_442_695_040_888_963_407,
111            m: u64::MAX,
112        }
113    }
114}
115
116impl Rng for Lcg {
117    fn next_u64(&mut self) -> u64 {
118        self.state = self.state.wrapping_mul(self.a).wrapping_add(self.c);
119        self.state
120    }
121}
122
123/// Fisher-Yates shuffle.
124pub fn shuffle<T>(data: &mut [T], rng: &mut impl Rng) {
125    let n = data.len();
126    for i in (1..n).rev() {
127        let j = (rng.next_u64() as usize) % (i + 1);
128        data.swap(i, j);
129    }
130}
131
132/// Sample k distinct indices from 0..n without replacement (Knuth's algorithm S).
133pub fn sample_without_replacement(n: usize, k: usize, rng: &mut impl Rng) -> Vec<usize> {
134    let k = k.min(n);
135    let mut result = Vec::with_capacity(k);
136    let mut needed = k;
137    let mut available = n;
138    for i in 0..n {
139        let u = rng.next_f64();
140        if u < needed as f64 / available as f64 {
141            result.push(i);
142            needed -= 1;
143            if needed == 0 { break; }
144        }
145        available -= 1;
146    }
147    result
148}
149
150/// Weighted sampling — draw one index proportional to weights.
151pub fn weighted_sample(weights: &[f64], rng: &mut impl Rng) -> usize {
152    let total: f64 = weights.iter().sum();
153    let mut r = rng.next_f64() * total;
154    for (i, &w) in weights.iter().enumerate() {
155        r -= w;
156        if r <= 0.0 { return i; }
157    }
158    weights.len() - 1
159}
160
161// ============================================================
162// DESCRIPTIVE STATISTICS
163// ============================================================
164
165/// Arithmetic mean.
166pub fn mean(data: &[f64]) -> f64 {
167    if data.is_empty() { return 0.0; }
168    data.iter().sum::<f64>() / data.len() as f64
169}
170
171/// Sample variance (Bessel's correction, n-1 denominator).
172pub fn variance(data: &[f64]) -> f64 {
173    let n = data.len();
174    if n < 2 { return 0.0; }
175    let m = mean(data);
176    data.iter().map(|x| (x - m).powi(2)).sum::<f64>() / (n - 1) as f64
177}
178
179/// Sample standard deviation.
180pub fn std_dev(data: &[f64]) -> f64 { variance(data).sqrt() }
181
182/// Median (sorts the slice in place).
183pub fn median(data: &mut [f64]) -> f64 {
184    data.sort_by(|a, b| a.partial_cmp(b).unwrap());
185    let n = data.len();
186    if n == 0 { return 0.0; }
187    if n % 2 == 0 { (data[n / 2 - 1] + data[n / 2]) / 2.0 } else { data[n / 2] }
188}
189
190/// Mode(s) — returns all values that appear most frequently.
191pub fn mode(data: &[f64]) -> Vec<f64> {
192    if data.is_empty() { return vec![]; }
193    let mut sorted = data.to_vec();
194    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
195    let mut modes = Vec::new();
196    let mut max_count = 0usize;
197    let mut count = 1usize;
198    for i in 1..sorted.len() {
199        if (sorted[i] - sorted[i - 1]).abs() < 1e-12 {
200            count += 1;
201        } else {
202            if count > max_count { max_count = count; modes.clear(); modes.push(sorted[i - 1]); }
203            else if count == max_count { modes.push(sorted[i - 1]); }
204            count = 1;
205        }
206    }
207    let last = *sorted.last().unwrap();
208    if count > max_count { modes = vec![last]; }
209    else if count == max_count { modes.push(last); }
210    modes
211}
212
213/// p-th percentile (p in [0,100]).
214pub fn percentile(data: &mut [f64], p: f64) -> f64 {
215    data.sort_by(|a, b| a.partial_cmp(b).unwrap());
216    let n = data.len();
217    if n == 0 { return 0.0; }
218    let idx = (p / 100.0 * (n - 1) as f64).clamp(0.0, (n - 1) as f64);
219    let lo = idx.floor() as usize;
220    let hi = idx.ceil() as usize;
221    let frac = idx - lo as f64;
222    data[lo] + frac * (data[hi] - data[lo])
223}
224
225/// Interquartile range.
226pub fn iqr(data: &mut [f64]) -> f64 {
227    let q3 = percentile(data, 75.0);
228    let q1 = percentile(data, 25.0);
229    q3 - q1
230}
231
232/// Sample skewness.
233pub fn skewness(data: &[f64]) -> f64 {
234    let n = data.len() as f64;
235    if n < 3.0 { return 0.0; }
236    let m = mean(data);
237    let s = std_dev(data);
238    if s == 0.0 { return 0.0; }
239    let sum: f64 = data.iter().map(|x| ((x - m) / s).powi(3)).sum();
240    sum * n / ((n - 1.0) * (n - 2.0))
241}
242
243/// Sample excess kurtosis.
244pub fn kurtosis(data: &[f64]) -> f64 {
245    let n = data.len() as f64;
246    if n < 4.0 { return 0.0; }
247    let m = mean(data);
248    let s = std_dev(data);
249    if s == 0.0 { return 0.0; }
250    let sum: f64 = data.iter().map(|x| ((x - m) / s).powi(4)).sum();
251    let g2 = sum * n * (n + 1.0) / ((n - 1.0) * (n - 2.0) * (n - 3.0))
252        - 3.0 * (n - 1.0).powi(2) / ((n - 2.0) * (n - 3.0));
253    g2
254}
255
256/// Sample covariance.
257pub fn covariance(x: &[f64], y: &[f64]) -> f64 {
258    let n = x.len().min(y.len());
259    if n < 2 { return 0.0; }
260    let mx = mean(x);
261    let my = mean(y);
262    x.iter().zip(y.iter()).map(|(xi, yi)| (xi - mx) * (yi - my)).sum::<f64>() / (n - 1) as f64
263}
264
265/// Pearson correlation coefficient.
266pub fn pearson_r(x: &[f64], y: &[f64]) -> f64 {
267    let cov = covariance(x, y);
268    let sx = std_dev(x);
269    let sy = std_dev(y);
270    if sx == 0.0 || sy == 0.0 { return 0.0; }
271    cov / (sx * sy)
272}
273
274/// Spearman rank correlation.
275pub fn spearman_rho(x: &[f64], y: &[f64]) -> f64 {
276    let n = x.len().min(y.len());
277    if n < 2 { return 0.0; }
278    let rank = |data: &[f64]| -> Vec<f64> {
279        let mut indexed: Vec<(usize, f64)> = data.iter().copied().enumerate().collect();
280        indexed.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
281        let mut ranks = vec![0.0f64; indexed.len()];
282        let mut i = 0;
283        while i < indexed.len() {
284            let mut j = i;
285            while j < indexed.len() && (indexed[j].1 - indexed[i].1).abs() < 1e-12 { j += 1; }
286            let avg_rank = (i + j - 1) as f64 / 2.0 + 1.0;
287            for k in i..j { ranks[indexed[k].0] = avg_rank; }
288            i = j;
289        }
290        ranks
291    };
292    let rx: Vec<f64> = rank(&x[..n]);
293    let ry: Vec<f64> = rank(&y[..n]);
294    pearson_r(&rx, &ry)
295}
296
297// ============================================================
298// SPECIAL FUNCTIONS
299// ============================================================
300
301/// Error function erf(x).
302pub fn erf(x: f64) -> f64 {
303    // Abramowitz & Stegun approximation 7.1.26
304    let t = 1.0 / (1.0 + 0.3275911 * x.abs());
305    let poly = t * (0.254829592
306        + t * (-0.284496736
307        + t * (1.421413741
308        + t * (-1.453152027
309        + t * 1.061405429))));
310    let result = 1.0 - poly * (-x * x).exp();
311    if x >= 0.0 { result } else { -result }
312}
313
314/// Complementary error function erfc(x).
315pub fn erfc(x: f64) -> f64 { 1.0 - erf(x) }
316
317/// Natural log of gamma function (Lanczos approximation).
318pub fn lgamma(x: f64) -> f64 {
319    const G: f64 = 7.0;
320    const C: [f64; 9] = [
321        0.99999999999980993,
322        676.5203681218851,
323        -1259.1392167224028,
324        771.32342877765313,
325        -176.61502916214059,
326        12.507343278686905,
327        -0.13857109526572012,
328        9.9843695780195716e-6,
329        1.5056327351493116e-7,
330    ];
331    if x < 0.5 {
332        return (PI / ((PI * x).sin())).ln() - lgamma(1.0 - x);
333    }
334    let z = x - 1.0;
335    let mut t = z + G + 0.5;
336    let mut s = C[0];
337    for i in 1..9 { s += C[i] / (z + i as f64); }
338    0.5 * (2.0 * PI).ln() + s.ln() + (z + 0.5) * t.ln() - t
339}
340
341/// Gamma function.
342pub fn gamma(x: f64) -> f64 { lgamma(x).exp() }
343
344/// Regularized incomplete gamma function P(a, x) — lower.
345pub fn gammainc_lower(a: f64, x: f64) -> f64 {
346    if x <= 0.0 { return 0.0; }
347    if x < a + 1.0 {
348        // Series expansion
349        let mut term = 1.0 / a;
350        let mut sum = term;
351        for n in 1..200usize {
352            term *= x / (a + n as f64);
353            sum += term;
354            if term.abs() < sum.abs() * 1e-12 { break; }
355        }
356        sum * (-x + a * x.ln() - lgamma(a)).exp()
357    } else {
358        // Continued fraction (Lentz's method)
359        let eps = 1e-12;
360        let mut b = x + 1.0 - a;
361        let mut c = 1.0 / 1e-300;
362        let mut d = 1.0 / b;
363        let mut h = d;
364        for i in 1..200i64 {
365            let an = -i as f64 * (i as f64 - a);
366            b += 2.0;
367            d = an * d + b;
368            if d.abs() < 1e-300 { d = 1e-300; }
369            c = b + an / c;
370            if c.abs() < 1e-300 { c = 1e-300; }
371            d = 1.0 / d;
372            let del = d * c;
373            h *= del;
374            if (del - 1.0).abs() < eps { break; }
375        }
376        1.0 - (-x + a * x.ln() - lgamma(a)).exp() * h
377    }
378}
379
380/// Regularized incomplete beta function I_x(a,b).
381pub fn betainc(x: f64, a: f64, b: f64) -> f64 {
382    if x <= 0.0 { return 0.0; }
383    if x >= 1.0 { return 1.0; }
384    let lbeta = lgamma(a) + lgamma(b) - lgamma(a + b);
385    let factor = (a * x.ln() + b * (1.0 - x).ln() - lbeta).exp();
386    // Use symmetry relation for convergence
387    if x < (a + 1.0) / (a + b + 2.0) {
388        factor * betacf(x, a, b) / a
389    } else {
390        1.0 - factor * betacf(1.0 - x, b, a) / b
391    }
392}
393
394fn betacf(x: f64, a: f64, b: f64) -> f64 {
395    let max_iter = 200;
396    let eps = 1e-12;
397    let qab = a + b;
398    let qap = a + 1.0;
399    let qam = a - 1.0;
400    let mut c = 1.0;
401    let mut d = 1.0 - qab * x / qap;
402    if d.abs() < 1e-300 { d = 1e-300; }
403    d = 1.0 / d;
404    let mut h = d;
405    for m in 1..=max_iter {
406        let m = m as f64;
407        let m2 = 2.0 * m;
408        let mut aa = m * (b - m) * x / ((qam + m2) * (a + m2));
409        d = 1.0 + aa * d;
410        if d.abs() < 1e-300 { d = 1e-300; }
411        c = 1.0 + aa / c;
412        if c.abs() < 1e-300 { c = 1e-300; }
413        d = 1.0 / d;
414        h *= d * c;
415        aa = -(a + m) * (qab + m) * x / ((a + m2) * (qap + m2));
416        d = 1.0 + aa * d;
417        if d.abs() < 1e-300 { d = 1e-300; }
418        c = 1.0 + aa / c;
419        if c.abs() < 1e-300 { c = 1e-300; }
420        d = 1.0 / d;
421        let del = d * c;
422        h *= del;
423        if (del - 1.0).abs() < eps { break; }
424    }
425    h
426}
427
428/// Inverse normal CDF (probit function) via rational approximation.
429pub fn probit(p: f64) -> f64 {
430    let p = p.clamp(1e-12, 1.0 - 1e-12);
431    let sign = if p < 0.5 { -1.0 } else { 1.0 };
432    let q = if p < 0.5 { p } else { 1.0 - p };
433    let t = (-2.0 * q.ln()).sqrt();
434    const C: [f64; 3] = [2.515517, 0.802853, 0.010328];
435    const D: [f64; 3] = [1.432788, 0.189269, 0.001308];
436    let num = C[0] + C[1] * t + C[2] * t * t;
437    let den = 1.0 + D[0] * t + D[1] * t * t + D[2] * t * t * t;
438    sign * (t - num / den)
439}
440
441/// Two-tailed p-value from t statistic with df degrees of freedom.
442pub fn p_value_from_t(t: f64, df: f64) -> f64 {
443    // CDF of t-distribution via regularized incomplete beta
444    let x = df / (df + t * t);
445    let p_one_tail = 0.5 * betainc(x, df / 2.0, 0.5);
446    (2.0 * p_one_tail).min(1.0)
447}
448
449/// p-value from chi-squared statistic with k degrees of freedom.
450pub fn p_value_from_chi2(chi2: f64, k: usize) -> f64 {
451    if chi2 <= 0.0 { return 1.0; }
452    1.0 - gammainc_lower(k as f64 / 2.0, chi2 / 2.0)
453}
454
455// ============================================================
456// PROBABILITY DISTRIBUTIONS
457// ============================================================
458
459/// Normal (Gaussian) distribution.
460#[derive(Clone, Debug)]
461pub struct NormalDist {
462    pub mean: f64,
463    pub std_dev: f64,
464}
465
466impl NormalDist {
467    pub fn pdf(&self, x: f64) -> f64 {
468        let z = (x - self.mean) / self.std_dev;
469        (-0.5 * z * z).exp() / (self.std_dev * (2.0 * PI).sqrt())
470    }
471    pub fn cdf(&self, x: f64) -> f64 {
472        0.5 * (1.0 + erf((x - self.mean) / (self.std_dev * 2.0f64.sqrt())))
473    }
474    pub fn inv_cdf(&self, p: f64) -> f64 {
475        self.mean + self.std_dev * probit(p)
476    }
477    /// Box-Muller sampling. Returns two independent samples.
478    pub fn sample_pair(&self, rng: &mut impl Rng) -> (f64, f64) {
479        let u1 = rng.next_f64().max(1e-300);
480        let u2 = rng.next_f64();
481        let r = (-2.0 * u1.ln()).sqrt();
482        let theta = 2.0 * PI * u2;
483        let z0 = r * theta.cos();
484        let z1 = r * theta.sin();
485        (self.mean + self.std_dev * z0, self.mean + self.std_dev * z1)
486    }
487    pub fn sample(&self, rng: &mut impl Rng) -> f64 { self.sample_pair(rng).0 }
488}
489
490/// Continuous uniform distribution.
491#[derive(Clone, Debug)]
492pub struct UniformDist {
493    pub min: f64,
494    pub max: f64,
495}
496
497impl UniformDist {
498    pub fn pdf(&self, x: f64) -> f64 {
499        if x >= self.min && x <= self.max { 1.0 / (self.max - self.min) } else { 0.0 }
500    }
501    pub fn cdf(&self, x: f64) -> f64 {
502        ((x - self.min) / (self.max - self.min)).clamp(0.0, 1.0)
503    }
504    pub fn inv_cdf(&self, p: f64) -> f64 { self.min + p * (self.max - self.min) }
505    pub fn sample(&self, rng: &mut impl Rng) -> f64 { self.inv_cdf(rng.next_f64()) }
506}
507
508/// Exponential distribution.
509#[derive(Clone, Debug)]
510pub struct ExponentialDist {
511    pub lambda: f64,
512}
513
514impl ExponentialDist {
515    pub fn pdf(&self, x: f64) -> f64 {
516        if x < 0.0 { 0.0 } else { self.lambda * (-self.lambda * x).exp() }
517    }
518    pub fn cdf(&self, x: f64) -> f64 {
519        if x < 0.0 { 0.0 } else { 1.0 - (-self.lambda * x).exp() }
520    }
521    pub fn inv_cdf(&self, p: f64) -> f64 { -((1.0 - p).max(1e-300)).ln() / self.lambda }
522    pub fn sample(&self, rng: &mut impl Rng) -> f64 { self.inv_cdf(rng.next_f64()) }
523}
524
525/// Poisson distribution.
526#[derive(Clone, Debug)]
527pub struct PoissonDist {
528    pub lambda: f64,
529}
530
531impl PoissonDist {
532    pub fn pmf(&self, k: u64) -> f64 {
533        (-self.lambda).exp() * self.lambda.powi(k as i32) / gamma(k as f64 + 1.0)
534    }
535    pub fn cdf(&self, k: u64) -> f64 {
536        (0..=k).map(|i| self.pmf(i)).sum()
537    }
538    /// Knuth algorithm for Poisson sampling.
539    pub fn sample(&self, rng: &mut impl Rng) -> u64 {
540        let l = (-self.lambda).exp();
541        let mut k = 0u64;
542        let mut p = 1.0;
543        loop {
544            k += 1;
545            p *= rng.next_f64();
546            if p <= l { break; }
547        }
548        k - 1
549    }
550}
551
552/// Binomial distribution.
553#[derive(Clone, Debug)]
554pub struct BinomialDist {
555    pub n: u64,
556    pub p: f64,
557}
558
559impl BinomialDist {
560    pub fn pmf(&self, k: u64) -> f64 {
561        if k > self.n { return 0.0; }
562        let log_coeff = lgamma(self.n as f64 + 1.0)
563            - lgamma(k as f64 + 1.0)
564            - lgamma((self.n - k) as f64 + 1.0);
565        (log_coeff + k as f64 * self.p.ln() + (self.n - k) as f64 * (1.0 - self.p).ln()).exp()
566    }
567    pub fn cdf(&self, k: u64) -> f64 {
568        (0..=k).map(|i| self.pmf(i)).sum()
569    }
570    pub fn sample(&self, rng: &mut impl Rng) -> u64 {
571        (0..self.n).filter(|_| rng.next_f64() < self.p).count() as u64
572    }
573}
574
575/// Beta distribution (Johnk's method for sampling).
576#[derive(Clone, Debug)]
577pub struct BetaDist {
578    pub alpha: f64,
579    pub beta: f64,
580}
581
582impl BetaDist {
583    pub fn pdf(&self, x: f64) -> f64 {
584        if x <= 0.0 || x >= 1.0 { return 0.0; }
585        let lbeta = lgamma(self.alpha) + lgamma(self.beta) - lgamma(self.alpha + self.beta);
586        ((self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (1.0 - x).ln() - lbeta).exp()
587    }
588    pub fn cdf(&self, x: f64) -> f64 { betainc(x, self.alpha, self.beta) }
589    pub fn sample(&self, rng: &mut impl Rng) -> f64 {
590        // Johnk's method
591        loop {
592            let u = rng.next_f64();
593            let v = rng.next_f64();
594            let x = u.powf(1.0 / self.alpha);
595            let y = v.powf(1.0 / self.beta);
596            if x + y <= 1.0 { return x / (x + y); }
597        }
598    }
599}
600
601/// Gamma distribution (Marsaglia-Tsang method for alpha >= 1).
602#[derive(Clone, Debug)]
603pub struct GammaDist {
604    pub shape: f64,  // alpha / k
605    pub scale: f64,  // theta
606}
607
608impl GammaDist {
609    pub fn pdf(&self, x: f64) -> f64 {
610        if x <= 0.0 { return 0.0; }
611        let log_scale = self.scale.ln();
612        ((self.shape - 1.0) * x.ln() - x / self.scale - self.shape * log_scale - lgamma(self.shape)).exp()
613    }
614    pub fn cdf(&self, x: f64) -> f64 {
615        if x <= 0.0 { return 0.0; }
616        gammainc_lower(self.shape, x / self.scale)
617    }
618    pub fn sample(&self, rng: &mut impl Rng) -> f64 {
619        let alpha = self.shape;
620        let s = if alpha >= 1.0 {
621            // Marsaglia-Tsang
622            let d = alpha - 1.0 / 3.0;
623            let c = 1.0 / (9.0 * d).sqrt();
624            let norm = NormalDist { mean: 0.0, std_dev: 1.0 };
625            loop {
626                let x = norm.sample(rng);
627                let v = (1.0 + c * x).powi(3);
628                if v <= 0.0 { continue; }
629                let u = rng.next_f64();
630                if u < 1.0 - 0.0331 * (x * x).powi(2) { break d * v; }
631                if u.ln() < 0.5 * x * x + d * (1.0 - v + v.ln()) { break d * v; }
632            }
633        } else {
634            // alpha < 1: use alpha+1 and scale
635            let d = alpha + 1.0 - 1.0 / 3.0;
636            let c = 1.0 / (9.0 * d).sqrt();
637            let norm = NormalDist { mean: 0.0, std_dev: 1.0 };
638            let s_plus1 = loop {
639                let x = norm.sample(rng);
640                let v = (1.0 + c * x).powi(3);
641                if v <= 0.0 { continue; }
642                let u = rng.next_f64();
643                if u < 1.0 - 0.0331 * (x * x).powi(2) { break d * v; }
644                if u.ln() < 0.5 * x * x + d * (1.0 - v + v.ln()) { break d * v; }
645            };
646            s_plus1 * rng.next_f64().powf(1.0 / alpha)
647        };
648        s * self.scale
649    }
650}
651
652/// Log-Normal distribution.
653#[derive(Clone, Debug)]
654pub struct LogNormalDist {
655    pub mu: f64,
656    pub sigma: f64,
657}
658
659impl LogNormalDist {
660    pub fn pdf(&self, x: f64) -> f64 {
661        if x <= 0.0 { return 0.0; }
662        let z = (x.ln() - self.mu) / self.sigma;
663        (-0.5 * z * z).exp() / (x * self.sigma * (2.0 * PI).sqrt())
664    }
665    pub fn cdf(&self, x: f64) -> f64 {
666        if x <= 0.0 { return 0.0; }
667        0.5 * (1.0 + erf((x.ln() - self.mu) / (self.sigma * 2.0f64.sqrt())))
668    }
669    pub fn sample(&self, rng: &mut impl Rng) -> f64 {
670        let norm = NormalDist { mean: self.mu, std_dev: self.sigma };
671        norm.sample(rng).exp()
672    }
673}
674
675/// Weibull distribution.
676#[derive(Clone, Debug)]
677pub struct WeibullDist {
678    pub shape: f64,   // k
679    pub scale: f64,   // lambda
680}
681
682impl WeibullDist {
683    pub fn pdf(&self, x: f64) -> f64 {
684        if x < 0.0 { return 0.0; }
685        let k = self.shape; let l = self.scale;
686        (k / l) * (x / l).powf(k - 1.0) * (-(x / l).powf(k)).exp()
687    }
688    pub fn cdf(&self, x: f64) -> f64 {
689        if x < 0.0 { return 0.0; }
690        1.0 - (-(x / self.scale).powf(self.shape)).exp()
691    }
692    pub fn inv_cdf(&self, p: f64) -> f64 {
693        self.scale * (-(1.0 - p).ln()).powf(1.0 / self.shape)
694    }
695    pub fn sample(&self, rng: &mut impl Rng) -> f64 { self.inv_cdf(rng.next_f64()) }
696}
697
698/// Cauchy distribution.
699#[derive(Clone, Debug)]
700pub struct CauchyDist {
701    pub location: f64,
702    pub scale: f64,
703}
704
705impl CauchyDist {
706    pub fn pdf(&self, x: f64) -> f64 {
707        let z = (x - self.location) / self.scale;
708        1.0 / (PI * self.scale * (1.0 + z * z))
709    }
710    pub fn cdf(&self, x: f64) -> f64 {
711        0.5 + ((x - self.location) / self.scale).atan() / PI
712    }
713    pub fn inv_cdf(&self, p: f64) -> f64 {
714        self.location + self.scale * (PI * (p - 0.5)).tan()
715    }
716    pub fn sample(&self, rng: &mut impl Rng) -> f64 { self.inv_cdf(rng.next_f64()) }
717}
718
719/// Student's t-distribution.
720#[derive(Clone, Debug)]
721pub struct StudentTDist {
722    pub degrees_of_freedom: f64,
723}
724
725impl StudentTDist {
726    pub fn pdf(&self, t: f64) -> f64 {
727        let nu = self.degrees_of_freedom;
728        let coeff = gamma((nu + 1.0) / 2.0) / (gamma(nu / 2.0) * (nu * PI).sqrt());
729        coeff * (1.0 + t * t / nu).powf(-(nu + 1.0) / 2.0)
730    }
731    pub fn cdf(&self, t: f64) -> f64 {
732        let nu = self.degrees_of_freedom;
733        let x = nu / (nu + t * t);
734        let ib = betainc(x, nu / 2.0, 0.5) / 2.0;
735        if t > 0.0 { 1.0 - ib } else { ib }
736    }
737    pub fn sample(&self, rng: &mut impl Rng) -> f64 {
738        let z = NormalDist { mean: 0.0, std_dev: 1.0 }.sample(rng);
739        let chi2 = GammaDist { shape: self.degrees_of_freedom / 2.0, scale: 2.0 }.sample(rng);
740        z / (chi2 / self.degrees_of_freedom).sqrt()
741    }
742}
743
744/// Chi-squared distribution.
745#[derive(Clone, Debug)]
746pub struct ChiSquaredDist {
747    pub k: f64,
748}
749
750impl ChiSquaredDist {
751    pub fn pdf(&self, x: f64) -> f64 {
752        GammaDist { shape: self.k / 2.0, scale: 2.0 }.pdf(x)
753    }
754    pub fn cdf(&self, x: f64) -> f64 {
755        if x <= 0.0 { return 0.0; }
756        gammainc_lower(self.k / 2.0, x / 2.0)
757    }
758    pub fn sample(&self, rng: &mut impl Rng) -> f64 {
759        GammaDist { shape: self.k / 2.0, scale: 2.0 }.sample(rng)
760    }
761}
762
763// ============================================================
764// HYPOTHESIS TESTING
765// ============================================================
766
767/// One-sample t-test against mu0.
768/// Returns (t-statistic, two-tailed p-value).
769pub fn t_test_one_sample(data: &[f64], mu0: f64) -> (f64, f64) {
770    let n = data.len() as f64;
771    if n < 2.0 { return (0.0, 1.0); }
772    let xbar = mean(data);
773    let s = std_dev(data);
774    if s == 0.0 { return (0.0, 1.0); }
775    let t = (xbar - mu0) / (s / n.sqrt());
776    let p = p_value_from_t(t, n - 1.0);
777    (t, p)
778}
779
780/// Welch's two-sample t-test.
781/// Returns (t-statistic, two-tailed p-value).
782pub fn t_test_two_sample(a: &[f64], b: &[f64]) -> (f64, f64) {
783    let na = a.len() as f64;
784    let nb = b.len() as f64;
785    if na < 2.0 || nb < 2.0 { return (0.0, 1.0); }
786    let ma = mean(a);
787    let mb = mean(b);
788    let sa2 = variance(a);
789    let sb2 = variance(b);
790    let se = (sa2 / na + sb2 / nb).sqrt();
791    if se == 0.0 { return (0.0, 1.0); }
792    let t = (ma - mb) / se;
793    // Welch-Satterthwaite degrees of freedom
794    let df = (sa2 / na + sb2 / nb).powi(2)
795        / ((sa2 / na).powi(2) / (na - 1.0) + (sb2 / nb).powi(2) / (nb - 1.0));
796    let p = p_value_from_t(t, df);
797    (t, p)
798}
799
800/// Chi-squared goodness-of-fit test.
801/// Returns (chi2-statistic, p-value).
802pub fn chi_squared_test(observed: &[f64], expected: &[f64]) -> (f64, f64) {
803    let chi2: f64 = observed
804        .iter()
805        .zip(expected.iter())
806        .map(|(o, e)| if *e > 0.0 { (o - e).powi(2) / e } else { 0.0 })
807        .sum();
808    let df = (observed.len() - 1).max(1);
809    let p = p_value_from_chi2(chi2, df);
810    (chi2, p)
811}
812
813/// Kolmogorov-Smirnov test against a theoretical CDF.
814/// Returns (D-statistic, approximate p-value).
815pub fn ks_test(data: &[f64], cdf: impl Fn(f64) -> f64) -> (f64, f64) {
816    let n = data.len();
817    if n == 0 { return (0.0, 1.0); }
818    let mut sorted = data.to_vec();
819    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
820    let mut d = 0.0f64;
821    for (i, &x) in sorted.iter().enumerate() {
822        let empirical_upper = (i + 1) as f64 / n as f64;
823        let empirical_lower = i as f64 / n as f64;
824        let theoretical = cdf(x);
825        d = d.max((empirical_upper - theoretical).abs());
826        d = d.max((empirical_lower - theoretical).abs());
827    }
828    // Approximate p-value using Kolmogorov distribution
829    let sqrt_n = (n as f64).sqrt();
830    let z = (sqrt_n + 0.12 + 0.11 / sqrt_n) * d;
831    // Two-tailed KS p-value approximation
832    let p = if z <= 0.0 { 1.0 } else {
833        let mut sum = 0.0;
834        for k in 1..50i64 {
835            let sign = if k % 2 == 0 { 1.0 } else { -1.0 };
836            sum += sign * (-2.0 * (k as f64).powi(2) * z * z).exp();
837        }
838        (2.0 * sum).clamp(0.0, 1.0)
839    };
840    (d, p)
841}
842
843/// Mann-Whitney U test (non-parametric, two-sample).
844/// Returns (U-statistic, approximate two-tailed p-value).
845pub fn mann_whitney_u(a: &[f64], b: &[f64]) -> (f64, f64) {
846    let na = a.len();
847    let nb = b.len();
848    let mut u = 0.0f64;
849    for &ai in a {
850        for &bi in b {
851            if ai > bi { u += 1.0; }
852            else if ai == bi { u += 0.5; }
853        }
854    }
855    let mean_u = na as f64 * nb as f64 / 2.0;
856    let std_u = ((na as f64 * nb as f64 * (na + nb + 1) as f64) / 12.0).sqrt();
857    if std_u == 0.0 { return (u, 1.0); }
858    let z = (u - mean_u) / std_u;
859    let norm = NormalDist { mean: 0.0, std_dev: 1.0 };
860    let p = 2.0 * (1.0 - norm.cdf(z.abs()));
861    (u, p)
862}
863
864/// Shapiro-Wilk test statistic W for normality.
865/// Uses first 20 a-coefficients approximation.
866pub fn shapiro_wilk_stat(data: &[f64]) -> f64 {
867    let n = data.len();
868    if n < 3 { return 1.0; }
869    let mut sorted = data.to_vec();
870    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
871    let m = mean(&sorted);
872    let ss: f64 = sorted.iter().map(|x| (x - m).powi(2)).sum();
873    if ss == 0.0 { return 1.0; }
874    // Approximate a coefficients using expected normal order statistics
875    let norm = NormalDist { mean: 0.0, std_dev: 1.0 };
876    let half = n / 2;
877    let mut b = 0.0f64;
878    for i in 0..half {
879        let expected_i = norm.inv_cdf((i as f64 + 0.625) / (n as f64 + 0.25));
880        let expected_n_i = norm.inv_cdf((n as f64 - 1.0 - i as f64 + 0.625) / (n as f64 + 0.25));
881        let a_i = expected_n_i - expected_i;
882        b += a_i * (sorted[n - 1 - i] - sorted[i]);
883    }
884    b * b / ss
885}
886
887// ============================================================
888// REGRESSION
889// ============================================================
890
891/// Simple linear regression: y = slope * x + intercept.
892pub fn linear_regression(x: &[f64], y: &[f64]) -> (f64, f64) {
893    let n = x.len().min(y.len()) as f64;
894    if n < 2.0 { return (0.0, 0.0); }
895    let mx = mean(x);
896    let my = mean(y);
897    let ss_xx: f64 = x.iter().map(|xi| (xi - mx).powi(2)).sum();
898    let ss_xy: f64 = x.iter().zip(y.iter()).map(|(xi, yi)| (xi - mx) * (yi - my)).sum();
899    if ss_xx == 0.0 { return (0.0, my); }
900    let slope = ss_xy / ss_xx;
901    let intercept = my - slope * mx;
902    (slope, intercept)
903}
904
905/// Polynomial regression of given degree. Returns coefficients [a0, a1, ..., a_deg].
906pub fn polynomial_regression(x: &[f64], y: &[f64], degree: usize) -> Vec<f64> {
907    let n = x.len().min(y.len());
908    let d = degree + 1;
909    // Build Vandermonde matrix X
910    let mut xmat = vec![vec![0.0f64; d]; n];
911    for i in 0..n {
912        for j in 0..d {
913            xmat[i][j] = x[i].powi(j as i32);
914        }
915    }
916    // X^T X
917    let mut xtx = vec![vec![0.0f64; d]; d];
918    for r in 0..d {
919        for c in 0..d {
920            for i in 0..n { xtx[r][c] += xmat[i][r] * xmat[i][c]; }
921        }
922    }
923    // X^T y
924    let mut xty = vec![0.0f64; d];
925    for r in 0..d {
926        for i in 0..n { xty[r] += xmat[i][r] * y[i]; }
927    }
928    // Solve xtx * coeffs = xty via Gaussian elimination
929    solve_system(&mut xtx, &mut xty).unwrap_or_else(|| vec![0.0; d])
930}
931
932fn solve_system(a: &mut Vec<Vec<f64>>, b: &mut Vec<f64>) -> Option<Vec<f64>> {
933    let n = b.len();
934    for k in 0..n {
935        let mut max_val = a[k][k].abs();
936        let mut max_row = k;
937        for i in k + 1..n {
938            if a[i][k].abs() > max_val { max_val = a[i][k].abs(); max_row = i; }
939        }
940        if max_val < 1e-12 { return None; }
941        a.swap(k, max_row);
942        b.swap(k, max_row);
943        let pivot = a[k][k];
944        for j in k..n { a[k][j] /= pivot; }
945        b[k] /= pivot;
946        for i in 0..n {
947            if i != k {
948                let factor = a[i][k];
949                for j in k..n { a[i][j] -= factor * a[k][j]; }
950                b[i] -= factor * b[k];
951            }
952        }
953    }
954    Some(b.clone())
955}
956
957/// Multiple linear regression (OLS). X is n_samples × n_features.
958/// Returns coefficient vector (including intercept as first element).
959pub fn multiple_linear_regression(x: &[Vec<f64>], y: &[f64]) -> Vec<f64> {
960    let n = x.len().min(y.len());
961    if n == 0 { return vec![]; }
962    let p = x[0].len() + 1; // +1 for intercept
963    // Build design matrix with intercept column
964    let mut xmat = vec![vec![0.0f64; p]; n];
965    for i in 0..n {
966        xmat[i][0] = 1.0;
967        for j in 1..p { xmat[i][j] = x[i][j - 1]; }
968    }
969    // X^T X
970    let mut xtx = vec![vec![0.0f64; p]; p];
971    for r in 0..p {
972        for c in 0..p {
973            for i in 0..n { xtx[r][c] += xmat[i][r] * xmat[i][c]; }
974        }
975    }
976    // X^T y
977    let mut xty = vec![0.0f64; p];
978    for r in 0..p {
979        for i in 0..n { xty[r] += xmat[i][r] * y[i]; }
980    }
981    solve_system(&mut xtx, &mut xty).unwrap_or_else(|| vec![0.0; p])
982}
983
984/// R-squared coefficient of determination.
985pub fn r_squared(y_true: &[f64], y_pred: &[f64]) -> f64 {
986    let n = y_true.len().min(y_pred.len());
987    if n == 0 { return 0.0; }
988    let mean_true = mean(y_true);
989    let ss_res: f64 = y_true.iter().zip(y_pred.iter()).map(|(y, yh)| (y - yh).powi(2)).sum();
990    let ss_tot: f64 = y_true.iter().map(|y| (y - mean_true).powi(2)).sum();
991    if ss_tot == 0.0 { return 1.0; }
992    1.0 - ss_res / ss_tot
993}
994
995/// Ridge regression (L2 regularized OLS). Returns coefficients.
996pub fn ridge_regression(x: &[Vec<f64>], y: &[f64], lambda: f64) -> Vec<f64> {
997    let n = x.len().min(y.len());
998    if n == 0 { return vec![]; }
999    let p = x[0].len() + 1;
1000    let mut xmat = vec![vec![0.0f64; p]; n];
1001    for i in 0..n {
1002        xmat[i][0] = 1.0;
1003        for j in 1..p { xmat[i][j] = x[i][j - 1]; }
1004    }
1005    let mut xtx = vec![vec![0.0f64; p]; p];
1006    for r in 0..p {
1007        for c in 0..p {
1008            for i in 0..n { xtx[r][c] += xmat[i][r] * xmat[i][c]; }
1009        }
1010    }
1011    // Add lambda * I (skip intercept at index 0)
1012    for j in 1..p { xtx[j][j] += lambda; }
1013    let mut xty = vec![0.0f64; p];
1014    for r in 0..p {
1015        for i in 0..n { xty[r] += xmat[i][r] * y[i]; }
1016    }
1017    solve_system(&mut xtx, &mut xty).unwrap_or_else(|| vec![0.0; p])
1018}
1019
1020/// Logistic regression via gradient descent.
1021/// `x` is n_samples × n_features, `y` is bool labels.
1022/// Returns weight vector (n_features + 1, including intercept).
1023pub fn logistic_regression(x: &[Vec<f64>], y: &[bool], lr: f64, epochs: usize) -> Vec<f64> {
1024    let n = x.len().min(y.len());
1025    if n == 0 { return vec![]; }
1026    let p = x[0].len() + 1;
1027    let mut w = vec![0.0f64; p];
1028    let sigmoid = |z: f64| 1.0 / (1.0 + (-z).exp());
1029    for _ in 0..epochs {
1030        let mut grad = vec![0.0f64; p];
1031        for i in 0..n {
1032            let mut z = w[0];
1033            for j in 1..p { z += w[j] * x[i][j - 1]; }
1034            let pred = sigmoid(z);
1035            let target = if y[i] { 1.0 } else { 0.0 };
1036            let err = pred - target;
1037            grad[0] += err;
1038            for j in 1..p { grad[j] += err * x[i][j - 1]; }
1039        }
1040        for j in 0..p { w[j] -= lr * grad[j] / n as f64; }
1041    }
1042    w
1043}
1044
1045// ============================================================
1046// BAYESIAN INFERENCE
1047// ============================================================
1048
1049/// Beta-Bernoulli conjugate model.
1050#[derive(Clone, Debug)]
1051pub struct BetaBernoulli {
1052    pub alpha: f64,
1053    pub beta: f64,
1054}
1055
1056/// Update Beta prior with new Bernoulli observations.
1057pub fn update_beta_bernoulli(prior: BetaBernoulli, successes: u32, failures: u32) -> BetaBernoulli {
1058    BetaBernoulli {
1059        alpha: prior.alpha + successes as f64,
1060        beta: prior.beta + failures as f64,
1061    }
1062}
1063
1064/// Posterior mean of Beta-Bernoulli model.
1065pub fn posterior_mean(dist: &BetaBernoulli) -> f64 {
1066    dist.alpha / (dist.alpha + dist.beta)
1067}
1068
1069/// Equal-tailed credible interval for Beta distribution.
1070pub fn credible_interval(dist: &BetaBernoulli, level: f64) -> (f64, f64) {
1071    let tail = (1.0 - level) / 2.0;
1072    let beta = BetaDist { alpha: dist.alpha, beta: dist.beta };
1073    // Numerical inversion of beta CDF
1074    let inv_beta_cdf = |p: f64| -> f64 {
1075        let mut lo = 0.0f64;
1076        let mut hi = 1.0f64;
1077        for _ in 0..100 {
1078            let mid = (lo + hi) * 0.5;
1079            if beta.cdf(mid) < p { lo = mid; } else { hi = mid; }
1080        }
1081        (lo + hi) * 0.5
1082    };
1083    (inv_beta_cdf(tail), inv_beta_cdf(1.0 - tail))
1084}
1085
1086/// Gaussian-Gaussian conjugate model (known variance).
1087#[derive(Clone, Debug)]
1088pub struct GaussianGaussian {
1089    pub prior_mean: f64,
1090    pub prior_variance: f64,
1091    pub likelihood_variance: f64,
1092}
1093
1094impl GaussianGaussian {
1095    /// Update posterior given n observations with sample mean.
1096    pub fn update(&self, sample_mean: f64, n: usize) -> (f64, f64) {
1097        let n = n as f64;
1098        let lv = self.likelihood_variance;
1099        let pv = self.prior_variance;
1100        let post_var = 1.0 / (1.0 / pv + n / lv);
1101        let post_mean = post_var * (self.prior_mean / pv + n * sample_mean / lv);
1102        (post_mean, post_var)
1103    }
1104}
1105
1106/// Bayesian Information Criterion.
1107pub fn bayesian_information_criterion(log_likelihood: f64, n_params: usize, n_samples: usize) -> f64 {
1108    -2.0 * log_likelihood + n_params as f64 * (n_samples as f64).ln()
1109}
1110
1111/// Akaike Information Criterion.
1112pub fn akaike_information_criterion(log_likelihood: f64, n_params: usize) -> f64 {
1113    -2.0 * log_likelihood + 2.0 * n_params as f64
1114}
1115
1116// ============================================================
1117// INFORMATION THEORY
1118// ============================================================
1119
1120/// Shannon entropy in nats (natural log base).
1121pub fn entropy(probs: &[f64]) -> f64 {
1122    probs.iter()
1123        .filter(|&&p| p > 0.0)
1124        .map(|&p| -p * p.ln())
1125        .sum()
1126}
1127
1128/// Cross-entropy H(P, Q) = -sum_x P(x) log Q(x).
1129pub fn cross_entropy(p: &[f64], q: &[f64]) -> f64 {
1130    p.iter()
1131        .zip(q.iter())
1132        .filter(|(&pi, &qi)| pi > 0.0 && qi > 0.0)
1133        .map(|(&pi, &qi)| -pi * qi.ln())
1134        .sum()
1135}
1136
1137/// KL divergence D_KL(P || Q) = sum_x P(x) log(P(x)/Q(x)).
1138pub fn kl_divergence(p: &[f64], q: &[f64]) -> f64 {
1139    p.iter()
1140        .zip(q.iter())
1141        .filter(|(&pi, &qi)| pi > 0.0 && qi > 0.0)
1142        .map(|(&pi, &qi)| pi * (pi / qi).ln())
1143        .sum()
1144}
1145
1146/// Mutual information I(X;Y) from joint probability matrix.
1147pub fn mutual_information(joint: &[Vec<f64>]) -> f64 {
1148    let rows = joint.len();
1149    if rows == 0 { return 0.0; }
1150    let cols = joint[0].len();
1151    let px: Vec<f64> = (0..rows).map(|i| joint[i].iter().sum()).collect();
1152    let py: Vec<f64> = (0..cols).map(|j| joint.iter().map(|row| row[j]).sum()).collect();
1153    let mut mi = 0.0;
1154    for i in 0..rows {
1155        for j in 0..cols {
1156            let pij = joint[i][j];
1157            if pij > 0.0 && px[i] > 0.0 && py[j] > 0.0 {
1158                mi += pij * (pij / (px[i] * py[j])).ln();
1159            }
1160        }
1161    }
1162    mi
1163}
1164
1165/// Jensen-Shannon divergence — symmetric, bounded [0, ln(2)].
1166pub fn jensen_shannon_divergence(p: &[f64], q: &[f64]) -> f64 {
1167    let m: Vec<f64> = p.iter().zip(q.iter()).map(|(pi, qi)| (pi + qi) * 0.5).collect();
1168    0.5 * kl_divergence(p, &m) + 0.5 * kl_divergence(q, &m)
1169}
1170
1171// ============================================================
1172// TESTS
1173// ============================================================
1174
1175#[cfg(test)]
1176mod tests {
1177    use super::*;
1178
1179    #[test]
1180    fn test_mean() {
1181        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1182        assert!((mean(&data) - 3.0).abs() < 1e-10);
1183    }
1184
1185    #[test]
1186    fn test_variance() {
1187        let data = vec![2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
1188        assert!((variance(&data) - 4.571428571428571).abs() < 1e-8);
1189    }
1190
1191    #[test]
1192    fn test_std_dev() {
1193        let data = vec![2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
1194        assert!((std_dev(&data) - 2.138).abs() < 0.001);
1195    }
1196
1197    #[test]
1198    fn test_median_odd() {
1199        let mut data = vec![3.0, 1.0, 4.0, 1.0, 5.0];
1200        assert_eq!(median(&mut data), 3.0);
1201    }
1202
1203    #[test]
1204    fn test_median_even() {
1205        let mut data = vec![1.0, 2.0, 3.0, 4.0];
1206        assert!((median(&mut data) - 2.5).abs() < 1e-10);
1207    }
1208
1209    #[test]
1210    fn test_percentile() {
1211        let mut data: Vec<f64> = (1..=100).map(|x| x as f64).collect();
1212        assert!((percentile(&mut data, 50.0) - 50.5).abs() < 0.5);
1213    }
1214
1215    #[test]
1216    fn test_pearson_r_perfect() {
1217        let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1218        let y: Vec<f64> = x.iter().map(|xi| 2.0 * xi + 1.0).collect();
1219        assert!((pearson_r(&x, &y) - 1.0).abs() < 1e-10);
1220    }
1221
1222    #[test]
1223    fn test_spearman_rho() {
1224        let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1225        let y = vec![5.0, 4.0, 3.0, 2.0, 1.0];
1226        assert!((spearman_rho(&x, &y) + 1.0).abs() < 1e-10);
1227    }
1228
1229    #[test]
1230    fn test_normal_dist_cdf() {
1231        let n = NormalDist { mean: 0.0, std_dev: 1.0 };
1232        assert!((n.cdf(0.0) - 0.5).abs() < 1e-6);
1233        assert!((n.cdf(1.96) - 0.975).abs() < 0.001);
1234    }
1235
1236    #[test]
1237    fn test_normal_dist_sample() {
1238        let n = NormalDist { mean: 5.0, std_dev: 2.0 };
1239        let mut rng = Xorshift64::new(42);
1240        let samples: Vec<f64> = (0..10000).map(|_| n.sample(&mut rng)).collect();
1241        let m = mean(&samples);
1242        assert!((m - 5.0).abs() < 0.1, "mean {} far from 5.0", m);
1243    }
1244
1245    #[test]
1246    fn test_exponential_sample() {
1247        let e = ExponentialDist { lambda: 2.0 };
1248        let mut rng = Xorshift64::new(42);
1249        let samples: Vec<f64> = (0..10000).map(|_| e.sample(&mut rng)).collect();
1250        let m = mean(&samples);
1251        assert!((m - 0.5).abs() < 0.05, "mean {} far from 0.5", m);
1252    }
1253
1254    #[test]
1255    fn test_poisson_sample() {
1256        let p = PoissonDist { lambda: 3.0 };
1257        let mut rng = Xorshift64::new(42);
1258        let samples: Vec<f64> = (0..10000).map(|_| p.sample(&mut rng) as f64).collect();
1259        let m = mean(&samples);
1260        assert!((m - 3.0).abs() < 0.1, "mean {} far from 3.0", m);
1261    }
1262
1263    #[test]
1264    fn test_gamma_sample() {
1265        let g = GammaDist { shape: 2.0, scale: 3.0 };
1266        let mut rng = Xorshift64::new(99);
1267        let samples: Vec<f64> = (0..10000).map(|_| g.sample(&mut rng)).collect();
1268        let m = mean(&samples);
1269        // Expected mean = shape * scale = 6
1270        assert!((m - 6.0).abs() < 0.2, "mean {} far from 6.0", m);
1271    }
1272
1273    #[test]
1274    fn test_t_test_one_sample() {
1275        let data = vec![10.0, 11.0, 9.5, 10.5, 10.2, 9.8, 10.1, 9.9, 10.3, 10.4];
1276        let (t, p) = t_test_one_sample(&data, 10.0);
1277        assert!(p > 0.05, "should not reject null at 10.0; p={}", p);
1278        let _ = t;
1279    }
1280
1281    #[test]
1282    fn test_t_test_two_sample() {
1283        let a = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1284        let b = vec![6.0, 7.0, 8.0, 9.0, 10.0];
1285        let (_t, p) = t_test_two_sample(&a, &b);
1286        assert!(p < 0.05, "should reject null; p={}", p);
1287    }
1288
1289    #[test]
1290    fn test_chi_squared_test() {
1291        let obs = vec![10.0, 20.0, 30.0];
1292        let exp = vec![10.0, 20.0, 30.0];
1293        let (chi2, p) = chi_squared_test(&obs, &exp);
1294        assert!(chi2.abs() < 1e-10);
1295        assert!(p > 0.9);
1296    }
1297
1298    #[test]
1299    fn test_linear_regression() {
1300        let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1301        let y: Vec<f64> = x.iter().map(|xi| 3.0 * xi + 1.0).collect();
1302        let (slope, intercept) = linear_regression(&x, &y);
1303        assert!((slope - 3.0).abs() < 1e-10);
1304        assert!((intercept - 1.0).abs() < 1e-10);
1305    }
1306
1307    #[test]
1308    fn test_polynomial_regression() {
1309        let x = vec![0.0, 1.0, 2.0, 3.0];
1310        let y: Vec<f64> = x.iter().map(|xi| xi * xi + 2.0 * xi + 1.0).collect();
1311        let coeffs = polynomial_regression(&x, &y, 2);
1312        assert_eq!(coeffs.len(), 3);
1313        assert!((coeffs[0] - 1.0).abs() < 1e-6);
1314        assert!((coeffs[1] - 2.0).abs() < 1e-6);
1315        assert!((coeffs[2] - 1.0).abs() < 1e-6);
1316    }
1317
1318    #[test]
1319    fn test_r_squared() {
1320        let y_true = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1321        let y_pred = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1322        assert!((r_squared(&y_true, &y_pred) - 1.0).abs() < 1e-10);
1323    }
1324
1325    #[test]
1326    fn test_logistic_regression_separable() {
1327        let x = vec![vec![-2.0], vec![-1.0], vec![1.0], vec![2.0]];
1328        let y = vec![false, false, true, true];
1329        let w = logistic_regression(&x, &y, 0.5, 500);
1330        let sigmoid = |z: f64| 1.0 / (1.0 + (-z).exp());
1331        let pred_neg = sigmoid(w[0] + w[1] * (-2.0));
1332        let pred_pos = sigmoid(w[0] + w[1] * 2.0);
1333        assert!(pred_neg < 0.5, "negative class should have prob < 0.5");
1334        assert!(pred_pos > 0.5, "positive class should have prob > 0.5");
1335    }
1336
1337    #[test]
1338    fn test_bayesian_update() {
1339        let prior = BetaBernoulli { alpha: 1.0, beta: 1.0 };
1340        let posterior = update_beta_bernoulli(prior, 6, 4);
1341        assert!((posterior.alpha - 7.0).abs() < 1e-10);
1342        assert!((posterior.beta - 5.0).abs() < 1e-10);
1343        assert!((posterior_mean(&posterior) - 7.0 / 12.0).abs() < 1e-10);
1344    }
1345
1346    #[test]
1347    fn test_entropy() {
1348        let uniform = vec![0.25, 0.25, 0.25, 0.25];
1349        assert!((entropy(&uniform) - (4.0f64).ln()).abs() < 1e-10);
1350    }
1351
1352    #[test]
1353    fn test_kl_divergence() {
1354        let p = vec![0.5, 0.5];
1355        let q = vec![0.5, 0.5];
1356        assert!(kl_divergence(&p, &q).abs() < 1e-10);
1357    }
1358
1359    #[test]
1360    fn test_jsd() {
1361        let p = vec![1.0, 0.0];
1362        let q = vec![0.0, 1.0];
1363        let jsd = jensen_shannon_divergence(&p, &q);
1364        assert!((jsd - 2.0f64.ln()).abs() < 1e-10);
1365    }
1366
1367    #[test]
1368    fn test_pcg32() {
1369        let mut rng = Pcg32::new(42, 1);
1370        let v: Vec<f64> = (0..1000).map(|_| rng.next_f64()).collect();
1371        let m = mean(&v);
1372        assert!((m - 0.5).abs() < 0.05);
1373    }
1374
1375    #[test]
1376    fn test_splitmix64() {
1377        let mut rng = SplitMix64::new(12345);
1378        let v: Vec<f64> = (0..1000).map(|_| rng.next_f64()).collect();
1379        let m = mean(&v);
1380        assert!((m - 0.5).abs() < 0.05);
1381    }
1382
1383    #[test]
1384    fn test_shuffle() {
1385        let mut data = vec![1, 2, 3, 4, 5, 6, 7, 8];
1386        let original = data.clone();
1387        let mut rng = Xorshift64::new(7);
1388        shuffle(&mut data, &mut rng);
1389        // Not necessarily different but should contain same elements
1390        let mut sorted = data.clone();
1391        sorted.sort();
1392        assert_eq!(sorted, vec![1, 2, 3, 4, 5, 6, 7, 8]);
1393        let _ = original;
1394    }
1395
1396    #[test]
1397    fn test_weighted_sample() {
1398        let weights = vec![0.0, 1.0, 0.0]; // must pick index 1
1399        let mut rng = Xorshift64::new(42);
1400        let idx = weighted_sample(&weights, &mut rng);
1401        assert_eq!(idx, 1);
1402    }
1403
1404    #[test]
1405    fn test_sample_without_replacement() {
1406        let mut rng = Xorshift64::new(42);
1407        let sample = sample_without_replacement(100, 10, &mut rng);
1408        assert_eq!(sample.len(), 10);
1409        // All unique
1410        let mut s = sample.clone();
1411        s.sort();
1412        s.dedup();
1413        assert_eq!(s.len(), 10);
1414    }
1415}