simd_kernels/kernels/scientific/distributions/shared/
scalar.rs

1//! # **Scalar Distribution Utilities Module** - *High-Precision Scalar Statistical Functions*
2//!
3//! Fundamental scalar mathematical functions providing the computational building blocks for
4//! statistical distribution calculations with optimal numerical precision and SIMD acceleration.
5//! These utilities form the foundation for all distribution PDF, CDF, and quantile computations.
6
7// Copyright Peter Bower 2025. All Rights Reserved.
8// Licensed under Mozilla Public License (MPL) 2.0.
9
10include!(concat!(env!("OUT_DIR"), "/simd_lanes.rs"));
11
12use std::{
13    f64::consts::PI,
14    simd::{LaneCount, Simd, StdFloat, SupportedLaneCount},
15};
16
17use crate::kernels::scientific::{
18    distributions::shared::constants::*,
19    erf::{erfc, erfc_inv},
20};
21
22/// Natural log of the absolute value of the Gamma function, ln|Γ(x)|.
23///
24/// * Aims to match `scipy.special.gammaln` for all real inputs.
25/// * Lanczos approximation (g = 7, n = 9) for x ≥ 0.5.
26/// * Reflection formula for x < 0.5 using `ln(|sin(πx)|)`.
27/// * Poles at non-positive integers return **+∞**.
28/// * Propagates NaN.
29#[inline(always)]
30pub fn ln_gamma(x: f64) -> f64 {
31    // Propagate NaN
32    if x.is_nan() {
33        return f64::NAN;
34    }
35
36    // Infinity input: ln_gamma(inf) == inf
37    if x.is_infinite() && x.is_sign_positive() {
38        return f64::INFINITY;
39    }
40
41    // Poles: Γ(x) has simple poles at 0, −1, −2, …  ⇒  ln|Γ| → +∞
42    if x <= 0.0 && (x.fract().abs() < 1e-14) {
43        return f64::INFINITY;
44    }
45
46    // Reflection branch for  x < 0.5
47    //
48    // SciPy’s gammaln returns ln|Γ(x)|, hence the absolute value on sin(πx).
49    if x < 0.5 {
50        return std::f64::consts::PI.ln()
51            - (std::f64::consts::PI * x).sin().abs().ln()
52            - ln_gamma(1.0 - x);
53    }
54
55    // Lanczos approximation for  x ≥ 0.5
56    let z = x - 1.0; // shift to minimise cancellation
57    let mut a = COF[0];
58    for (i, &c) in COF.iter().enumerate().skip(1) {
59        a += c / (z + i as f64);
60    }
61    let t = z + 7.5; // g + ½  with g = 7
62    HALF_LOG_TWO_PI + (z + 0.5) * t.ln() - t + a.ln()
63}
64
65/// ln(k!) = ln_gamma(k+1)
66#[inline(always)]
67pub fn ln_gamma_plus1(k: f64) -> f64 {
68    ln_gamma(k + 1.0)
69}
70
71/// Vectorised Lanczos ln Γ for x >= 1.0  (reflection not needed in binomial)
72/// Helper due to missing simd helpers in std_lib
73#[inline(always)]
74pub fn ln_gamma_simd<const N: usize>(x: Simd<f64, N>) -> Simd<f64, N>
75where
76    LaneCount<N>: SupportedLaneCount,
77{
78    let z = x - Simd::splat(1.0); // x‐1
79    let mut a = Simd::splat(COF[0]); // Σ c₀
80    for (i, &c) in COF.iter().enumerate().skip(1) {
81        a += Simd::splat(c) / (z + Simd::splat(i as f64));
82    }
83    let t = z + Simd::splat(7.5); // x-1 + g + 0.5
84    let half_ln_two_pi = Simd::splat(0.9189385332046727_f64); // ½ ln(2π)
85    half_ln_two_pi + (z + Simd::splat(0.5)) * t.ln() - t + a.ln()
86}
87
88/// Inverse of the regularised lower incomplete gamma:
89/// finds `x` such that  P(a, x) = p  (a>0, 0≤p≤1).
90#[inline(always)]
91pub fn inv_reg_lower_gamma(a: f64, p: f64) -> f64 {
92    if !(a.is_finite() && p.is_finite()) || a <= 0.0 {
93        return f64::NAN;
94    }
95    if p <= 0.0 {
96        return 0.0;
97    }
98    if p >= 1.0 {
99        return f64::INFINITY;
100    }
101
102    // Use correct small-x asymptotic:  P(a,x) ~ x^a / Γ(a+1)
103    // This works for any a and is essential for tiny p (e.g. 1e-300).
104    let mut x = if p < 1e-8 {
105        (p * gamma_func(a + 1.0)).powf(1.0 / a).max(1e-300)
106    } else if a > 1.0 {
107        // Wilson–Hilferty style guess (no normal inverse needed).
108        let t = (-2.0 * p.ln()).sqrt();
109        let s = (t - (2.0 * a - 1.0).sqrt()) / (2.0 * (a - 1.0).sqrt());
110        (a * (1.0 - s + (s * s) / 3.0)).max(1e-300)
111    } else {
112        (p * gamma_func(a + 1.0)).powf(1.0 / a).max(1e-300)
113    };
114
115    // ---- Newton refinement ----------------------------------------------
116    const MAX_ITERS: usize = 80;
117    const REL_TOL: f64 = 1e-15;
118    const ABS_FLOOR: f64 = 5e-16;
119
120    for _ in 0..MAX_ITERS {
121        let f = reg_lower_gamma(a, x) - p; // F(x)=P(a,x)-p
122        let fp = gamma_pdf_scalar(a, x); // F'(x)=f(a,x)
123        if !fp.is_finite() || fp.abs() < 1e-300 {
124            break;
125        }
126        let mut dx = f / fp;
127
128        let max_step = x.max(1.0); // a bit looser than 0.5*x
129        if dx.abs() > max_step {
130            dx = max_step * dx.signum();
131        }
132
133        let x_new = (x - dx).max(1e-300);
134        let tol = (REL_TOL * (1.0 + x_new.abs())).max(ABS_FLOOR);
135        if (x_new - x).abs() <= tol {
136            x = x_new;
137            break;
138        }
139        x = x_new;
140    }
141
142    let f = reg_lower_gamma(a, x) - p;
143    let fp = gamma_pdf_scalar(a, x);
144    if fp.is_finite() && fp.abs() > 0.0 && x.is_finite() && x > 0.0 {
145        let fpp = fp * ((a - 1.0) / x - 1.0); // derivative of pdf
146        let h = f / fp;
147        let denom = 1.0 - 0.5 * h * (fpp / fp);
148        if denom.is_finite() && denom != 0.0 {
149            let xh = (x - h / denom).max(1e-300);
150            // only accept if it actually improves:
151            if (reg_lower_gamma(a, xh) - p).abs() <= f.abs() {
152                x = xh;
153            }
154        }
155    }
156
157    x
158}
159
160/// Inverse of the regularised upper incomplete gamma:
161/// finds `x` such that  Q(a, x) = q  (a>0, 0≤q≤1), Q=1-P.
162#[inline(always)]
163pub fn inv_reg_upper_gamma(a: f64, q: f64) -> f64 {
164    if !(a.is_finite() && q.is_finite()) || a <= 0.0 {
165        return f64::NAN;
166    }
167    if q <= 0.0 {
168        return f64::INFINITY;
169    }
170    if q >= 1.0 {
171        return 0.0;
172    }
173
174    let p = 1.0 - q;
175
176    // ---- Initial guess ---------------------------------------------------
177    // For extreme right tail, this bias works well; for moderate q Newton fixes it.
178    let mut x = if q > 1e-8 && a > 1.0 {
179        let t = (-2.0 * q.ln()).sqrt();
180        let s = (t - (2.0 * a - 1.0).sqrt()) / (2.0 * (a - 1.0).sqrt());
181        (a * (1.0 - s + (s * s) / 3.0)).max(1e-300)
182    } else {
183        // When 1-p is tiny (i.e., p≈1), start from tiny-x for the *complement*.
184        (p * gamma_func(a + 1.0)).powf(1.0 / a).max(1e-300)
185    };
186
187    // ---- Newton refinement on F(x)=Q(a,x)-q = 1-P(a,x)-q ----------------
188    const MAX_ITERS: usize = 80;
189    const REL_TOL: f64 = 1e-15;
190    const ABS_FLOOR: f64 = 5e-16;
191
192    for _ in 0..MAX_ITERS {
193        let f = (1.0 - reg_lower_gamma(a, x)) - q; // F(x)=Q-q
194        let fp = -gamma_pdf_scalar(a, x); // F'(x)=-pdf
195        if !fp.is_finite() || fp.abs() < 1e-300 {
196            break;
197        }
198        let mut dx = f / fp;
199
200        let max_step = x.max(1.0);
201        if dx.abs() > max_step {
202            dx = max_step * dx.signum();
203        }
204
205        let x_new = (x - dx).max(1e-300);
206        let tol = (REL_TOL * (1.0 + x_new.abs())).max(ABS_FLOOR);
207        if (x_new - x).abs() <= tol {
208            x = x_new;
209            break;
210        }
211        x = x_new;
212    }
213
214    // Halley polish
215    let f = (1.0 - reg_lower_gamma(a, x)) - q;
216    let fp = -gamma_pdf_scalar(a, x);
217    if fp.is_finite() && fp.abs() > 0.0 && x.is_finite() && x > 0.0 {
218        let fpp = -fp * ((a - 1.0) / x - 1.0); // derivative of -pdf
219        let h = f / fp;
220        let denom = 1.0 - 0.5 * h * (fpp / fp);
221        if denom.is_finite() && denom != 0.0 {
222            let xh = (x - h / denom).max(1e-300);
223            if ((1.0 - reg_lower_gamma(a, xh)) - q).abs() <= f.abs() {
224                x = xh;
225            }
226        }
227    }
228
229    x
230}
231
232/// Regularised lower incomplete gamma P(a, x)
233///
234/// Edge cases:
235/// * `x < 0`              → NaN
236/// * `a  < 0`             → NaN
237/// * `a == 0` & x ≥ 0     → 1.0
238/// * `x == 0` & a  > 0    → 0.0
239/// * any NaN argument     → NaN
240#[inline(always)]
241pub fn reg_lower_gamma(a: f64, x: f64) -> f64 {
242    // Propagate NaNs first
243    if !(a.is_finite() && x.is_finite()) {
244        return f64::NAN;
245    }
246    // Domain-error branches -----------------------------------------------
247    if x < 0.0 {
248        return f64::NAN;
249    }
250    if a < 0.0 {
251        return f64::NAN;
252    }
253    if a == 0.0 {
254        return 1.0;
255    } // gammainc(0, x) == 1 for x ≥ 0
256    if x == 0.0 {
257        return 0.0;
258    } // positive a, zero x
259
260    if x <= 0.0 {
261        return 0.0;
262    }
263    if a <= 0.0 {
264        return 0.0;
265    }
266    if x < a + 1.0 {
267        // Series representation
268        let mut ap = a;
269        let mut sum = 1.0 / a;
270        let mut del = sum;
271        for _ in 0..100 {
272            ap += 1.0;
273            del *= x / ap;
274            sum += del;
275            if del.abs() < sum.abs() * 1e-14 {
276                break;
277            }
278        }
279        (-(x) + a * x.ln() - ln_gamma(a)).exp() * sum
280    } else {
281        // Continued fraction (Lentz's method)
282        let mut b = x + 1.0 - a;
283        let mut c = 1.0 / f64::MIN_POSITIVE;
284        let mut d = 1.0 / b;
285        let mut h = d;
286        for i in 1..100 {
287            let an = -i as f64 * (i as f64 - a);
288            b += 2.0;
289            d = an * d + b;
290            if d.abs() < 1e-30 {
291                d = 1e-30
292            }
293            c = b + an / c;
294            if c.abs() < 1e-30 {
295                c = 1e-30
296            }
297            d = 1.0 / d;
298            let delta = d * c;
299            h *= delta;
300            if (delta - 1.0).abs() < 1e-14 {
301                break;
302            }
303        }
304        1.0 - (-(x) + a * x.ln() - ln_gamma(a)).exp() * h
305    }
306}
307
308/// Scalar gamma PDF for Newton refinement
309#[inline(always)]
310pub fn gamma_pdf_scalar(a: f64, x: f64) -> f64 {
311    if x <= 0.0 {
312        return 0.0;
313    }
314    ((a - 1.0) * x.ln() - x - ln_gamma(a)).exp()
315}
316
317//// Computes the Gamma function Γ(x).
318///  
319/// Special cases:
320/// * `x = 0`           → `+∞`
321/// * `x ∈ ℤ⁻` (negative integer) → `NaN`
322/// * `x = n`   (1 ≤ n ≤ 170, integer) → exact `(n-1)!` via lookup
323/// * `x = n+½` (0 ≤ n ≤ 170)         → closed-form half-integer Gamma
324/// * otherwise  
325///   * if `x > 0`  → Lanczos via `exp(ln_gamma(x))`
326///   * if `x < 0`  → reflection  Γ(x)=π / [sin(πx) Γ(1−x)]
327#[inline(always)]
328pub fn gamma_func(x: f64) -> f64 {
329    // NaN propagates
330    if x.is_nan() {
331        return f64::NAN;
332    }
333
334    // 0 → +∞    (SciPy)
335    if x == 0.0 {
336        return f64::INFINITY;
337    }
338
339    // Negative integers (simple poles) → NaN   (SciPy)
340    if x < 0.0 && (x.fract().abs() < 1e-14) {
341        return f64::NAN;
342    }
343
344    // ----------------  Positive region fast paths  ---------------- //
345    if x > 0.0 {
346        // Exact factorials  Γ(n) = (n-1)!   for 1 ≤ n ≤ 171
347        if x.fract().abs() < 1e-14 {
348            let n = x as usize;
349            if n >= 1 && n <= 171 {
350                // (n-1)!  ;  lookup table holds 0! … 170!
351                return factorial_lookup((n - 1) as u64);
352            }
353        }
354
355        // Positive half-integers  Γ(n+½)
356        if (x - 0.5).fract().abs() < 1e-14 {
357            let n = (x - 0.5).round() as u64; // n ≥ 0
358            if n <= 170 {
359                return half_integer_gamma(n);
360            }
361        }
362
363        // General positive x  — Lanczos via lnΓ
364        return ln_gamma(x).exp();
365    }
366
367    // ----------------  x < 0  (non-integer)  ---------------- //
368    //
369    // Reflection formula:
370    //     Γ(x) = π / [ sin(πx) · Γ(1 − x) ]
371    let sin_pi_x = (std::f64::consts::PI * x).sin();
372
373    // If sin(πx) == 0 the point would be a pole; but we already ruled
374    // out integer arguments above, so a tiny guard is sufficient.
375    if sin_pi_x.abs() < f64::EPSILON {
376        return f64::NAN; // should never be hit, keeps us safe
377    }
378
379    // Γ(1−x) is called with a positive argument (because x<0 ⇒ 1−x>1),
380    // so the recursion bottoms out in the positive fast-paths.
381    let gamma_1_minus_x = gamma_func(1.0 - x);
382
383    std::f64::consts::PI / (sin_pi_x * gamma_1_minus_x)
384}
385
386/// Evaluates gamma function at half-integer arguments using closed-form expression.
387///
388/// Computes Γ(n + 1/2) for non-negative integer n using the exact closed-form
389/// formula involving factorials and powers.
390#[inline(always)]
391pub fn half_integer_gamma(n: u64) -> f64 {
392    let two_n_fact = factorial_lookup(2 * n);
393    let n_fact = factorial_lookup(n);
394    let pow4n = 2f64.powi(2 * n as i32); // 4^n = 2^(2n)
395    (PI.sqrt() * two_n_fact) / (pow4n * n_fact)
396}
397
398/// Regularised incomplete beta I_x(a, b).
399///
400///   * `a == 0`  →  1.0  (mass entirely to the right of x)
401///   * `b == 0`  →  0.0  (mass entirely at the left of x)
402///   * non-finite inputs propagate `NaN`
403///   * x ≤ 0 → 0 ··· x ≥ 1 → 1
404#[inline(always)]
405pub fn incomplete_beta(a: f64, b: f64, x: f64) -> f64 {
406    // Handle NaNs first
407    if !(a.is_finite() && b.is_finite() && x.is_finite()) {
408        return f64::NAN;
409    }
410
411    // Domain edges -----------------------------------------------------------
412    if x <= 0.0 {
413        return 0.0;
414    }
415    if x >= 1.0 {
416        return 1.0;
417    }
418
419    // a = 0 or b = 0  (SciPy conventions)
420    if a == 0.0 {
421        return 1.0; // all probability mass to the right of any x > 0
422    }
423    if b == 0.0 {
424        return 0.0; // all mass at x = 0, so CDF is identically 0
425    }
426
427    // -----------------------------------------------------------------------
428    //    Regular computation
429    // -----------------------------------------------------------------------
430    // Use symmetry for better convergence
431    if x > (a + 1.0) / (a + b + 2.0) {
432        // I_x(a,b) = 1 - I_{1-x}(b,a)
433        return 1.0 - incomplete_beta(b, a, 1.0 - x);
434    }
435
436    // front factor :  x^a (1-x)^b / (a * B(a,b))
437    let ln_beta = ln_gamma(a) + ln_gamma(b) - ln_gamma(a + b);
438    let front = (a * x.ln() + b * (1.0 - x).ln() - ln_beta).exp() / a;
439
440    // Lentz's continued fraction
441    const EPS: f64 = 1e-14;
442    const FPMIN: f64 = 1e-300;
443    const MAX_ITS: usize = 200;
444
445    let mut c = 1.0;
446    let mut d = 1.0 - (a + b) * x / (a + 1.0);
447    if d.abs() < FPMIN {
448        d = FPMIN;
449    }
450    d = 1.0 / d;
451    let mut h = d;
452
453    for m in 1..=MAX_ITS {
454        let m2 = 2 * m;
455
456        // -------- even step
457        let aa = m as f64 * (b - m as f64) * x / ((a + m2 as f64 - 1.0) * (a + m2 as f64));
458        d = 1.0 + aa * d;
459        if d.abs() < FPMIN {
460            d = FPMIN;
461        }
462        c = 1.0 + aa / c;
463        if c.abs() < FPMIN {
464            c = FPMIN;
465        }
466        d = 1.0 / d;
467        h *= d * c;
468
469        // -------- odd step
470        let aa =
471            -(a + m as f64) * (a + b + m as f64) * x / ((a + m2 as f64) * (a + m2 as f64 + 1.0));
472        d = 1.0 + aa * d;
473        if d.abs() < FPMIN {
474            d = FPMIN;
475        }
476        c = 1.0 + aa / c;
477        if c.abs() < FPMIN {
478            c = FPMIN;
479        }
480        d = 1.0 / d;
481        let delta = d * c;
482        h *= delta;
483
484        if (delta - 1.0).abs() < EPS {
485            break;
486        }
487    }
488    front * h
489}
490
491/// Inverse regularised incomplete beta I_x(a, b)
492///
493/// Special cases:
494///   * Any non-finite input → NaN
495///   * a == 0 → returns 1.0  (if p < 1)  or NaN if p out of [0,1]
496///   * b == 0 → returns 0.0  (if p > 0)  or NaN if p out of [0,1]
497/// Inverse regularised incomplete beta I_x(a, b)
498///
499/// * Any non-finite input → NaN
500/// * a == 0 → 1.0   (if 0 ≤ p ≤ 1)
501/// * b == 0 → 0.0   (if 0 ≤ p ≤ 1)
502#[inline(always)]
503pub fn incomplete_beta_inv(a: f64, b: f64, p: f64) -> f64 {
504    // Fast parameter & domain checks
505    if !(a.is_finite() && b.is_finite() && p.is_finite()) {
506        return f64::NAN;
507    }
508    if p < 0.0 || p > 1.0 {
509        return f64::NAN;
510    }
511    if p == 0.0 {
512        return 0.0;
513    }
514    if p == 1.0 {
515        return 1.0;
516    }
517
518    if a == 0.0 {
519        return 1.0;
520    } // mass jumps to the right of 0
521    if b == 0.0 {
522        return 0.0;
523    } // mass concentrated at 0
524
525    // Initial Cornish-Fisher / Wilson–Hilferty seed
526    let pp = if p < 0.5 { p } else { 1.0 - p };
527    let t = (-2.0 * pp.ln()).sqrt();
528    let mut x: f64;
529
530    if a > 1.0 && b > 1.0 {
531        // central region (both shape parameters > 1)
532        let num = 2.30753 + 0.27061 * t;
533        let den = 1.0 + (0.99229 + 0.04481 * t) * t;
534        let mut xp = t - num / den;
535        if p < 0.5 {
536            xp = -xp;
537        }
538
539        let al = (xp * xp - 3.0) / 6.0;
540        let h = 2.0 / (1.0 / (2.0 * a - 1.0) + 1.0 / (2.0 * b - 1.0));
541        let w = xp * (al + h).sqrt() / h
542            - (1.0 / (2.0 * b - 1.0) - 1.0 / (2.0 * a - 1.0)) * (al + 5.0 / 6.0 - 2.0 / (3.0 * h));
543        x = a / (a + b * w.exp());
544    } else {
545        // one of the shapes ≤ 1 : use power-law seed
546        let r = a / (a + b);
547        let y = if p < r { p / r } else { (1.0 - p) / (1.0 - r) };
548        x = if p < r {
549            y.powf(1.0 / a)
550        } else {
551            1.0 - y.powf(1.0 / b)
552        };
553    }
554
555    // Clamp to open interval (0,1) – avoids log/derivative blow-ups
556    const EPS_X: f64 = 1e-15;
557    if x <= 0.0 {
558        x = EPS_X;
559    }
560    if x >= 1.0 {
561        x = 1.0 - EPS_X;
562    }
563
564    // Newton iterations
565    const NEWTON_EPS: f64 = 1e-14;
566    const MAX_NEWTON: usize = 20;
567
568    let ln_beta = ln_gamma(a) + ln_gamma(b) - ln_gamma(a + b);
569
570    for _ in 0..MAX_NEWTON {
571        let f = incomplete_beta(a, b, x) - p;
572
573        // derivative:  x^(a-1) (1-x)^(b-1) / B(a,b)
574        let df = ((a - 1.0) * x.ln() + (b - 1.0) * (1.0 - x).ln() - ln_beta).exp();
575
576        // if derivative underflows, break to bisection
577        if df == 0.0 || !df.is_finite() {
578            break;
579        }
580
581        let dx = f / df;
582        let mut x_new = x - dx;
583
584        // Keep within (0,1)
585        if x_new <= 0.0 {
586            x_new = 0.5 * x;
587        }
588        if x_new >= 1.0 {
589            x_new = 0.5 * (1.0 + x);
590        }
591
592        if (dx).abs() < NEWTON_EPS * x_new.max(EPS_X) {
593            return x_new;
594        }
595        x = x_new;
596    }
597
598    // Bracketed bisection – fallback
599    let mut lo = 0.0;
600    let mut hi = 1.0;
601
602    if incomplete_beta(a, b, x) > p {
603        hi = x;
604    } else {
605        lo = x;
606    }
607
608    for _ in 0..64 {
609        // 2⁻⁶⁴  <  5e-20  ⇒ double precision satisfied
610        let mid = 0.5 * (lo + hi);
611        let f_mid = incomplete_beta(a, b, mid);
612
613        if (f_mid - p).abs() < 1e-14 {
614            // SciPy-level tolerance
615            return mid;
616        }
617        if f_mid < p {
618            lo = mid;
619        } else {
620            hi = mid;
621        }
622    }
623
624    // final midpoint
625    0.5 * (lo + hi)
626}
627
628/// Regularised lower incomplete gamma (series + continued fraction).
629#[inline(always)]
630pub fn regularised_gamma_p(s: f64, x: f64) -> f64 {
631    // s > 0, x >= 0
632    if x <= 0.0 {
633        return 0.0;
634    }
635    if x < s + 1.0 {
636        // Series representation
637        let mut sum = 1.0 / s;
638        let mut value = sum;
639        let mut n = 1.0;
640        while value.abs() > 1e-15 * sum {
641            value *= x / (s + n);
642            sum += value;
643            n += 1.0;
644            if n > 200.0 {
645                break;
646            }
647        }
648        (-(x) + s * x.ln() - ln_gamma(s)).exp() * sum
649    } else {
650        // Continued fraction representation
651        let mut a = 1.0 - s;
652        let mut b = a + x + 1.0;
653        let mut c = 0.0;
654        let mut p1 = 1.0;
655        let mut q1 = x;
656        let mut p2 = x + 1.0;
657        let mut q2 = b * x;
658        let mut ans = p2 / q2;
659        let mut n = 1.0;
660        while (p2 - p1).abs() > 1e-15 * ans.abs() {
661            a += 1.0;
662            b += 2.0;
663            c += 1.0;
664            let an = a * c;
665            let p3 = b * p2 - an * p1;
666            let q3 = b * q2 - an * q1;
667            if q3.abs() < 1e-30 {
668                break;
669            }
670            p1 = p2;
671            q1 = q2;
672            p2 = p3;
673            q2 = q3;
674            if q2 != 0.0 {
675                ans = p2 / q2;
676            }
677            n += 1.0;
678            if n > 200.0 {
679                break;
680            }
681        }
682        1.0 - (-(x) + s * x.ln() - ln_gamma(s)).exp() * ans
683    }
684}
685
686/// High-performance vectorised logarithmic binomial coefficient computation.
687///
688/// Computes ln(C(n,k)) = ln(n! / (k!(n-k)!)) for vectors of n and k values
689/// using SIMD vectorisation and optimised gamma function evaluation.
690#[inline(always)]
691pub fn ln_choose_v(
692    n: core::simd::Simd<f64, W64>,
693    k: core::simd::Simd<f64, W64>,
694) -> core::simd::Simd<f64, W64> {
695    ln_gamma_simd(n + core::simd::Simd::<f64, W64>::splat(1.0))
696        - ln_gamma_simd(k + core::simd::Simd::<f64, W64>::splat(1.0))
697        - ln_gamma_simd(n - k + core::simd::Simd::<f64, W64>::splat(1.0))
698}
699
700/// Computes logarithmic binomial coefficient for integer arguments with validation.
701///
702/// Evaluates ln(C(n,k)) = ln(n! / (k!(n-k)!)) for non-negative integer arguments
703/// using gamma function evaluation.
704#[inline(always)]
705pub fn ln_choose(n: u64, k: u64) -> f64 {
706    if k > n {
707        return f64::NEG_INFINITY;
708    } // log(0) for invalid (impossible) binomials
709    ln_gamma((n + 1) as f64) - ln_gamma((k + 1) as f64) - ln_gamma((n - k + 1) as f64)
710}
711
712/// Generic SIMD logarithmic binomial coefficient with compile-time lane count.
713///
714/// Computes ln(C(n,k)) for vectors of n and k values using SIMD vectorisation
715/// with arbitrary lane counts determined at compile time.
716#[inline(always)]
717pub fn ln_choose_simd<const N: usize>(n: Simd<f64, N>, k: Simd<f64, N>) -> Simd<f64, N>
718where
719    LaneCount<N>: SupportedLaneCount,
720{
721    ln_gamma_simd(n + Simd::splat(1.0))
722        - ln_gamma_simd(k + Simd::splat(1.0))
723        - ln_gamma_simd(n - k + Simd::splat(1.0))
724}
725
726/// Compute the Cornish–Fisher-based binomial quantile for a single `pi`.
727/// Returns `f64::NAN` for any out-of-range or non-finite input.
728/// Does not handle nulls; caller is responsible.
729#[inline(always)]
730pub fn binomial_quantile_cornish_fisher(pi: f64, n: u64, p_: f64) -> f64 {
731    // Edge cases per scipy convention
732    if !pi.is_finite() || !p_.is_finite() || n > (i64::MAX as u64) {
733        return f64::NAN;
734    }
735    if pi <= 0.0 {
736        return -1.0;
737    }
738    if pi >= 1.0 {
739        return n as f64;
740    }
741    let mu = n as f64 * p_;
742    let sigma = (n as f64 * p_ * (1.0 - p_)).sqrt();
743    let z = normal_quantile_scalar(pi, 0.0, 1.0);
744    let skew = (1.0 - 2.0 * p_) / sigma;
745    let cf = z + skew * (z * z - 1.0) / 6.0;
746    let mut k = (mu + sigma * cf + 0.5).floor();
747    if k < 0.0 {
748        k = 0.0;
749    }
750    if k > n as f64 {
751        k = n as f64;
752    }
753    let mut k_int = k as i64;
754    let cdf = binomial_cdf_scalar(k_int, n, p_);
755    if cdf < pi {
756        while k_int < (n as i64) && binomial_cdf_scalar(k_int, n, p_) < pi {
757            k_int += 1;
758        }
759    } else {
760        while k_int > 0 && binomial_cdf_scalar(k_int - 1, n, p_) >= pi {
761            k_int -= 1;
762        }
763    }
764    k_int as f64
765}
766
767/// Scalar binomial CDF
768///
769/// * k < 0  → 0  
770/// * k ≥ n  → 1  
771/// * p ≤ 0  → 1 for k ≥ 0  
772/// * p ≥ 1  → 0 for k < n, 1 for k ≥ n
773///
774/// Arguments:
775///     k: i64 - Number of observed successes (can be negative).
776///     n: u64 - Number of trials.
777///     p: f64 - Probability of success (0 <= p <= 1).
778///
779/// Accuracy:
780/// - This function aims to match the output of `scipy.stats.binom`
781/// with a maximum absolute error of `1e-12` for all reasonable arguments.
782/// - For most cases tested, that error is within < `1e-14`.
783#[inline(always)]
784pub fn binomial_cdf_scalar(k: i64, n: u64, p: f64) -> f64 {
785    // ---- Domain short-cuts ------------------------------------------------
786    if k < 0 {
787        return 0.0;
788    } // left of support
789    if k as u64 >= n {
790        return 1.0;
791    } // CDF reaches 1 at n
792    if p <= 0.0 {
793        return 1.0;
794    } // all mass at 0
795    if p >= 1.0 {
796        return 0.0;
797    } // all mass at n (but k < n here)
798
799    // ---- Regular summation (k < n, 0 < p < 1) -----------------------------
800    let k = k as u64;
801    let mut cdf = 0.0_f64;
802    let mut prob = (1.0 - p).powf(n as f64); // P(X = 0)
803    cdf += prob;
804
805    for i in 1..=k as usize {
806        // recurrence:  P(X = i) = P(X = i-1) * (n-i+1)/i * p/(1-p)
807        prob *= ((n - i as u64 + 1) as f64) * p / ((i as f64) * (1.0 - p));
808        cdf += prob;
809    }
810    cdf
811}
812
813/// Core inverse standard normal function for left tail probabilities.
814///
815/// Computes Φ⁻¹(p) for probabilities p ∈ (0, 0.5] using Acklam's rational
816/// approximation optimised for the left tail region.
817#[inline(always)]
818pub fn inv_std_normal_core(p: f64) -> f64 {
819    debug_assert!(p > 0.0 && p <= 0.5);
820
821    if p > P_LOW {
822        // ---------------- central region ----------------
823        let r = p - 0.5;
824        let s = r * r;
825        let num = (((((A[0] * s + A[1]) * s + A[2]) * s + A[3]) * s + A[4]) * s + A[5]) * r;
826        let den = ((((B[0] * s + B[1]) * s + B[2]) * s + B[3]) * s + B[4]) * s + 1.0;
827        num / den
828    } else {
829        // ---------------- lower tail --------------------
830        let r = (-2.0 * p.ln()).sqrt();
831        let num = ((((C[0] * r + C[1]) * r + C[2]) * r + C[3]) * r + C[4]) * r + C[5];
832        let den = (((D[0] * r + D[1]) * r + D[2]) * r + D[3]) * r + 1.0;
833        //  NOTE:  `num` is already negative here, so we do *not*
834        //  apply an extra minus sign.
835        num / den // ⇒  negative z-score
836    }
837}
838
839/// Computes the inverse standard normal cumulative distribution function (quantile function).
840///
841/// Calculates the z-score corresponding to a given probability p such that Φ(z) = p,
842/// where Φ is the standard normal CDF. Uses high-precision rational approximations
843/// for numerical accuracy across the entire probability range.
844///
845/// # Parameters
846/// - `p`: Probability value in the range [0, 1] for which to compute the quantile
847///
848/// # Returns
849/// The z-score (quantile) such that the standard normal CDF evaluated at z equals p.
850///
851/// # Domain and Range
852/// - **Domain**: p ∈ [0, 1]
853/// - **Range**: z ∈ (-∞, ∞)
854/// - **Special cases**:
855///   - `p = 0.0` returns `-∞`
856///   - `p = 1.0` returns `+∞`
857///   - `p = 0.5` returns `0.0`
858#[inline(always)]
859pub fn inv_std_normal(p: f64) -> f64 {
860    if !(p > 0.0 && p < 1.0) {
861        return f64::NAN;
862    }
863    let (q, sign) = if p < 0.5 { (p, 1.0) } else { (1.0 - p, -1.0) };
864    let x = if q < 0.02425 {
865        let t = (-2.0 * q.ln()).sqrt();
866        (((((C[0] * t + C[1]) * t + C[2]) * t + C[3]) * t + C[4]) * t + C[5])
867            / ((((D[0] * t + D[1]) * t + D[2]) * t + D[3]) * t + 1.0)
868    } else {
869        let t = q - 0.5;
870        let r = t * t;
871        (((((A[0] * r + A[1]) * r + A[2]) * r + A[3]) * r + A[4]) * r + A[5]) * t
872            / (((((B[0] * r + B[1]) * r + B[2]) * r + B[3]) * r + B[4]) * r + 1.0)
873    };
874    sign * x
875}
876
877/// Specialised Newton refinement for extreme chi-squared quantile computation.
878///
879/// High-precision iterative refinement method specifically optimised for
880/// extreme chi-squared quantiles where standard methods may suffer from
881/// numerical instability. Uses extended iteration counts and tighter
882/// convergence tolerances to achieve maximum accuracy in challenging regions.
883#[inline(always)]
884pub fn chi2_newton_refine_extreme(mut x: f64, a: f64, p: f64) -> f64 {
885    // Specialised Newton refinement for extreme chi-squared quantiles
886    // Uses more iterations and tighter tolerance for extreme probabilities
887    let ln_norm = -a * std::f64::consts::LN_2 - ln_gamma(a);
888    let mut lo = 0.0_f64;
889    let mut hi = f64::INFINITY;
890    for _ in 0..16 {
891        if !x.is_finite() {
892            break;
893        }
894        let t = 0.5 * x;
895        let fx = reg_lower_gamma(a, t) - p;
896        if fx.abs() < 1e-15 {
897            break;
898        }
899
900        let log_pdf = ln_norm + (a - 1.0) * x.ln() - 0.5 * x;
901        let pdf = log_pdf.exp();
902
903        if pdf <= 0.0 || !pdf.is_finite() {
904            if fx > 0.0 {
905                hi = hi.min(x);
906            } else {
907                lo = lo.max(x);
908            }
909            x = if hi.is_finite() {
910                0.5 * (lo + hi)
911            } else {
912                (x + lo.max(0.0)) * 0.5
913            };
914            continue;
915        }
916
917        let mut step = fx / pdf;
918        let max_step = 0.5 * (x.max(1.0));
919        if step.abs() > max_step {
920            step = step.signum() * max_step;
921        }
922
923        let x_new = (x - step).max(0.0);
924        if fx > 0.0 {
925            hi = hi.min(x);
926        } else {
927            lo = lo.max(x);
928        }
929        if x_new < lo || x_new > hi {
930            x = if hi.is_finite() {
931                0.5 * (lo + hi)
932            } else {
933                (x + lo) * 0.5
934            };
935        } else {
936            x = x_new;
937        }
938    }
939    x
940}
941
942/// Standard Newton refinement for chi-squared quantile computation.
943///
944/// Efficient iterative refinement method for chi-squared distribution quantiles
945/// using safeguarded Newton's method with adaptive step damping. Provides
946/// optimal balance between computational efficiency and numerical accuracy
947/// for most parameter combinations encountered in statistical applications.
948#[inline(always)]
949pub fn chi2_newton_refine(mut x: f64, a: f64, p: f64) -> f64 {
950    // Safeguarded Newton with damping; works from a decent seed
951    let ln_norm = -a * std::f64::consts::LN_2 - ln_gamma(a); // for chi2 pdf log-constant
952    let mut lo = 0.0_f64;
953    let mut hi = f64::INFINITY;
954    for _ in 0..8 {
955        if !x.is_finite() {
956            break;
957        }
958        let t = 0.5 * x;
959        let fx = reg_lower_gamma(a, t) - p; // target function
960        if fx.abs() < 1e-14 {
961            break;
962        }
963
964        // log pdf: ln f'(x) = ln pdf_{chi2}(x)
965        // pdf(x) = exp(ln_norm + (a-1)*ln(x) - x/2)
966        let log_pdf = ln_norm + (a - 1.0) * x.ln() - 0.5 * x;
967        let pdf = log_pdf.exp();
968
969        if pdf <= 0.0 || !pdf.is_finite() {
970            // fall back to bisection-like step if derivative unusable
971            if fx > 0.0 {
972                hi = hi.min(x);
973            } else {
974                lo = lo.max(x);
975            }
976            x = if hi.is_finite() {
977                0.5 * (lo + hi)
978            } else {
979                (x + lo.max(0.0)) * 0.5
980            };
981            continue;
982        }
983
984        // Newton step with damping
985        let mut step = fx / pdf;
986        // limit overly large relative steps to keep monotone progress
987        let max_step = 0.5 * (x.max(1.0));
988        if step.abs() > max_step {
989            step = step.signum() * max_step;
990        }
991
992        let x_new = (x - step).max(0.0); // enforce domain
993        // maintain bracket based on sign of f
994        if fx > 0.0 {
995            hi = hi.min(x);
996        } else {
997            lo = lo.max(x);
998        }
999        x = x_new;
1000    }
1001    x
1002}
1003
1004/// Evaluates standard normal cumulative distribution function with high accuracy.
1005///
1006/// Computes the cumulative distribution function Φ(z) of the standard normal
1007/// distribution N(0,1) at the specified point z using the complementary error
1008/// function for optimal numerical precision. This implementation provides
1009/// superior accuracy compared to direct integration methods.
1010#[inline(always)]
1011pub fn normal_cdf_scalar(z: f64) -> f64 {
1012    // high-accuracy CDF:  0.5·erfc(–z/√2) on the left, 1 – 0.5·erfc(z/√2) on the right
1013    if z < 0.0 {
1014        0.5 * erfc(-z / SQRT_2)
1015    } else {
1016        1.0 - 0.5 * erfc(z / SQRT_2)
1017    }
1018}
1019
1020/// Evaluates standard normal probability density function at given point.
1021///
1022/// Computes the probability density function φ(z) of the standard normal
1023/// distribution N(0,1) at the specified point z. This function provides
1024/// numerically stable evaluation across the entire real domain with
1025/// optimal computational efficiency for statistical applications.
1026#[inline(always)]
1027pub fn normal_pdf_scalar(z: f64) -> f64 {
1028    // Standard normal PDF
1029    (-0.5 * z * z).exp() / (2.0 * PI).sqrt()
1030}
1031
1032/// Inverse CDF Φ⁻¹(q) for the normal distribution.
1033///
1034/// Accuracy:
1035/// - Centre and bulk (e.g. 0.025 ≤ q ≤ 0.975): |err| < 1e-14 (equivalent to scipy.stats.norm.ppf, confirmed by unit tests).
1036/// - Extreme tails (q ≲ 1e-10 or q ≳ 1–1e-10): |err| < 1e-12 compared to SciPy reference values.
1037/// - **Reciprocal symmetry:** |Φ⁻¹(q) + Φ⁻¹(1–q)| is only guaranteed < 1e-7 in the extreme tails,  
1038///   due to inherent limitations of the underlying algorithms and double-precision arithmetic.
1039///   This limitation is observed in SciPy as well as this implementation.
1040/// ```
1041pub fn normal_quantile_scalar(q: f64, mean: f64, std: f64) -> f64 {
1042    // Early exit edge cases
1043    if !q.is_finite() || !mean.is_finite() || !std.is_finite() || std <= 0.0 {
1044        return f64::NAN;
1045    }
1046    if q < 0.0 || q > 1.0 {
1047        return f64::NAN;
1048    }
1049    if q == 0.0 {
1050        return f64::NEG_INFINITY;
1051    }
1052    if q == 1.0 {
1053        return f64::INFINITY;
1054    }
1055    if q == 0.5 {
1056        return mean;
1057    }
1058
1059    // symmetry reduction
1060    let (p_left, sign) = if q < 0.5 { (q, -1.0) } else { (1.0 - q, 1.0) };
1061
1062    // extreme-tail shortcut via erfc⁻¹
1063    const EPS_DBL: f64 = 1.110_223_024_625_156_5e-16;
1064    if p_left < EPS_DBL {
1065        // Φ⁻¹(p) = −√2 · erfc⁻¹(2p)   (for p ≤ 0.5)
1066        let z_tail = -SQRT_2 * erfc_inv(2.0 * p_left);
1067        return mean + std * sign * -z_tail; // mirror if q > 0.5
1068    }
1069
1070    // Acklam initial approximation
1071    let mut z = inv_std_normal_core(p_left); // negative
1072
1073    // one Halley refinement step
1074    // Halley:  z_{n+1} = z_n − f/f' · (1 + ½ f · f'' / f'^2)
1075    // Here f = Φ(z) − p,  f' = φ(z),  f'' = −z φ(z)
1076    let pdf = normal_pdf_scalar(z);
1077    let cdf = normal_cdf_scalar(z);
1078    let f = cdf - p_left;
1079    let u = f / pdf;
1080    z -= u * (1.0 + 0.5 * z * u); // ≤ 1 ulp after this step
1081
1082    // reflect to right tail if necessary
1083    let z_final = sign * -z;
1084
1085    mean + std * z_final
1086}
1087
1088#[cfg(test)]
1089mod tests {
1090    use super::*;
1091
1092    // All tests below were ran with Scipy v1.16.0, with the code responsible for producing
1093    // them saved under /python/tests/univariate.py.
1094    // The tests annotate the expected result we have matched with the functions.
1095
1096    #[test]
1097    fn test_ln_gamma() {
1098        // scipy.special.gammaln(1.0) == 0.0
1099        assert!((ln_gamma(1.0) - 0.0).abs() < 1e-14);
1100        // scipy.special.gammaln(5.0) == 3.1780538303479458
1101        assert!((ln_gamma(5.0) - 3.1780538303479458).abs() < 1e-14);
1102        // scipy.special.gammaln(0.5) == 0.5723649429247
1103        assert!((ln_gamma(0.5) - 0.5723649429247).abs() < 1e-14);
1104        // scipy.special.gammaln(10.1) == 13.027526738633238
1105        assert!((ln_gamma(10.1) - 13.027526738633238).abs() < 1e-10);
1106        // scipy.special.gammaln(0.0) == inf
1107        assert!(ln_gamma(0.0).is_infinite() && ln_gamma(0.0).is_sign_positive());
1108        // scipy.special.gammaln(-1.0) == inf
1109        assert!(ln_gamma(-1.0).is_infinite() && ln_gamma(-1.0).is_sign_positive());
1110        // scipy.special.gammaln(-0.5) == 1.2655121234846454
1111        assert!((ln_gamma(-0.5) - 1.2655121234846454).abs() < 1e-14);
1112        // scipy.special.gammaln(-10.1) == -13.020973271011497
1113        assert!((ln_gamma(-10.1) - -13.020973271011497).abs() < 1e-12);
1114        // scipy.special.gammaln(np.nan) == nan
1115        assert!(ln_gamma(f64::NAN).is_nan());
1116        // scipy.special.gammaln(1e-10) == 23.025850929882733
1117        assert!((ln_gamma(1e-10) - 23.025850929882733).abs() < 1e-12);
1118        // scipy.special.gammaln(171.624) == 709.7807744366991
1119        assert!((ln_gamma(171.624) - 709.7807744366991).abs() < 1e-9);
1120    }
1121
1122    #[test]
1123    fn test_ln_gamma_plus1() {
1124        // scipy.special.gammaln(6.0) == 4.787491742782046
1125        assert!((ln_gamma_plus1(5.0) - 4.787491742782046).abs() < 1e-14);
1126    }
1127
1128    #[test]
1129    fn test_gamma_func() {
1130        // scipy.special.gamma(1.0) == 1.0
1131        assert!((gamma_func(1.0) - 1.0).abs() < 1e-14);
1132        // scipy.special.gamma(5.0) == 24.0
1133        assert!((gamma_func(5.0) - 24.0).abs() < 1e-14);
1134        // scipy.special.gamma(0.5) == 1.7724538509055159
1135        assert!((gamma_func(0.5) - 1.7724538509055159).abs() < 1e-14);
1136        // scipy.special.gamma(10.1) == 454760.7514415855
1137        assert!((gamma_func(10.1) - 454760.7514415855).abs() < 1e-7);
1138        // scipy.special.gamma(0.0) == inf
1139        assert!(gamma_func(0.0).is_infinite() && gamma_func(0.0).is_sign_positive());
1140        // scipy.special.gamma(-1.0) == nan
1141        assert!(gamma_func(-1.0).is_nan());
1142        // scipy.special.gamma(-0.5) == -3.5449077018110318
1143        assert!((gamma_func(-0.5) + 3.5449077018110318).abs() < 1e-14);
1144        // scipy.special.gamma(-10.1) == -2.213416583085619e-06
1145        assert!((gamma_func(-10.1) + 2.213416583085619e-6).abs() < 1e-14);
1146        // scipy.special.gamma(171.0) == 7.257415615308e+306
1147        assert!((gamma_func(171.0) - 7.257415615308e+306).abs() < 1e292); // tolerance due to magnitude
1148        // scipy.special.gamma(np.nan) == nan
1149        assert!(gamma_func(f64::NAN).is_nan());
1150        // Large argument, should not panic and should return inf.
1151        assert!(ln_gamma(1e308).is_infinite());
1152        // Negative infinity should return NaN by convention.
1153        assert!(ln_gamma(f64::NEG_INFINITY).is_nan());
1154        // Positive infinity should return inf
1155        assert!(ln_gamma(f64::INFINITY).is_infinite());
1156    }
1157
1158    #[test]
1159    fn test_ln_choose() {
1160        // np.log(scipy.special.comb(5, 2, exact=False)) == 2.302585092994046
1161        assert!((ln_choose(5, 2) - 2.302585092994046).abs() < 1e-14);
1162        // k > n, should be -inf
1163        assert!(ln_choose(2, 3).is_infinite() && ln_choose(2, 3).is_sign_negative());
1164        // symmetry: C(n,k) == C(n, n-k)
1165        assert!((ln_choose(100, 3) - ln_choose(100, 97)).abs() < 1e-14);
1166        // np.log(scipy.special.comb(1000, 10, exact=False)) == 53.927997037888275
1167        assert!((ln_choose(1000, 10) - 53.927997037888275).abs() < 1e-10);
1168        // Edge combinatorics
1169        assert!((ln_choose(10, 0) - 0.0).abs() < 1e-14);
1170        assert!((ln_choose(10, 10) - 0.0).abs() < 1e-14);
1171        assert!((ln_choose(0, 0) - 0.0).abs() < 1e-14);
1172    }
1173
1174    #[test]
1175    fn test_incomplete_beta() {
1176        // scipy.special.betainc(2.0, 2.0, 0.5) == 0.5
1177        assert!((incomplete_beta(2.0, 2.0, 0.5) - 0.5).abs() < 1e-14);
1178        // scipy.special.betainc(2.5, 1.5, 0.7) == 0.5843121477019746
1179        assert!((incomplete_beta(2.5, 1.5, 0.7) - 0.5843121477019746).abs() < 1e-12);
1180        // scipy.special.betainc(2.0, 2.0, 0.0) == 0.0
1181        assert!((incomplete_beta(2.0, 2.0, 0.0) - 0.0).abs() < 1e-14);
1182        // scipy.special.betainc(2.0, 2.0, 1.0) == 1.0
1183        assert!((incomplete_beta(2.0, 2.0, 1.0) - 1.0).abs() < 1e-14);
1184        // scipy.special.betainc(0.0, 2.0, 0.5) == 1.0
1185        assert!((incomplete_beta(0.0, 2.0, 0.5) - 1.0).abs() < 1e-14);
1186        // scipy.special.betainc(2.0, 0.0, 0.5) == 0.0
1187        assert!((incomplete_beta(2.0, 0.0, 0.5) - 0.0).abs() < 1e-14);
1188        // scipy.special.betainc(2.0, 2.0, np.nan) == nan
1189        assert!(incomplete_beta(2.0, 2.0, f64::NAN).is_nan());
1190
1191        // Symmetry property Iₓ(a,b)+I_{1-x}(b,a)=1:
1192        let a = 2.7;
1193        let b = 5.3;
1194        let x = 0.4;
1195        let ix = incomplete_beta(a, b, x);
1196        let ix2 = incomplete_beta(b, a, 1.0 - x);
1197        assert!((ix + ix2 - 1.0).abs() < 1e-16);
1198        // scipy.special.betainc(50.0, 0.5, 0.01) == 7.998227417904836e-102
1199        assert!((incomplete_beta(50.0, 0.5, 0.01) - 7.998227417904836e-102).abs() < 1e-90);
1200
1201        // Large a, b
1202        assert!(incomplete_beta(1e8, 2.0, 0.5).is_finite());
1203        // x extremely close to 0
1204        assert!((incomplete_beta(2.0, 3.0, 1e-20) - 0.0).abs() < 1e-20);
1205        // x extremely close to 1
1206        assert!((incomplete_beta(2.0, 3.0, 1.0 - 1e-20) - 1.0).abs() < 1e-14);
1207        // Smallest positive subnormal x
1208        assert!(incomplete_beta(2.0, 3.0, f64::MIN_POSITIVE).is_finite());
1209
1210        // Incomplete beta round-trip
1211        for &a in &[2.0, 5.0, 10.0] {
1212            for &b in &[2.0, 5.0, 10.0] {
1213                for &p in &[1e-15, 1e-6, 0.1, 0.5, 0.9, 1.0 - 1e-8, 1.0 - 1e-15] {
1214                    let x = incomplete_beta_inv(a, b, p);
1215                    let p2 = incomplete_beta(a, b, x);
1216                    assert!((p - p2).abs() < 1e-12);
1217                }
1218            }
1219        }
1220    }
1221
1222    #[test]
1223    fn test_incomplete_beta_inv() {
1224        // scipy.special.betaincinv(2.0, 2.0, 0.5) == 0.5
1225        assert!((incomplete_beta_inv(2.0, 2.0, 0.5) - 0.5).abs() < 1e-14);
1226        // scipy.special.betaincinv(2.5, 1.5, 0.8433681004114891) == 0.8595961302031141
1227        assert!(
1228            (incomplete_beta_inv(2.5, 1.5, 0.8433681004114891) - 0.8595961302031141).abs() < 1e-12
1229        );
1230        // scipy.special.betaincinv(2.0, 2.0, 0.0) == 0.0
1231        assert!((incomplete_beta_inv(2.0, 2.0, 0.0) - 0.0).abs() < 1e-14);
1232        // scipy.special.betaincinv(2.0, 2.0, 1.0) == 1.0
1233        assert!((incomplete_beta_inv(2.0, 2.0, 1.0) - 1.0).abs() < 1e-14);
1234
1235        // incomplete_beta_inv edge cases
1236        // scipy.special.betaincinv(2.0, 2.0, np.nan) == nan
1237        assert!(incomplete_beta_inv(2.0, 2.0, f64::NAN).is_nan());
1238        // scipy.special.betaincinv(np.nan, 2.0, 0.5) == nan
1239        assert!(incomplete_beta_inv(f64::NAN, 2.0, 0.5).is_nan());
1240
1241        // incomplete_beta_inv
1242        let a = 7.0;
1243        let b = 3.0;
1244        let p = 1e-6;
1245        let x = incomplete_beta_inv(a, b, p);
1246        let p2 = incomplete_beta(a, b, x);
1247
1248        println!("{}", (p - p2).abs());
1249        assert!((p - p2).abs() < 1e-16);
1250    }
1251
1252    #[test]
1253    fn test_reg_lower_gamma() {
1254        // scipy.special.gammainc(2.0, 2.0) == 0.5939941502901616
1255        assert!((reg_lower_gamma(2.0, 2.0) - 0.5939941502901616).abs() < 1e-12);
1256        // scipy.special.gammainc(5.0, 1.0) == 0.003659846827343713
1257        assert!((reg_lower_gamma(5.0, 1.0) - 0.003659846827343713).abs() < 1e-14);
1258        // scipy.special.gammainc(2.0, 0.0) == 0.0
1259        assert!((reg_lower_gamma(2.0, 0.0) - 0.0).abs() < 1e-14);
1260        // scipy.special.gammainc(0.0, 2.0) == 1.0
1261        assert!((reg_lower_gamma(0.0, 2.0) - 1.0).abs() < 1e-14);
1262        // scipy.special.gammainc(2.0, -1.0) == nan
1263        assert!(reg_lower_gamma(2.0, -1.0).is_nan());
1264        // scipy.special.gammainc(-1.0, 2.0) == nan
1265        assert!(reg_lower_gamma(-1.0, 2.0).is_nan());
1266        // scipy.special.gammainc(np.nan, 2.0) == nan
1267        assert!(reg_lower_gamma(f64::NAN, 2.0).is_nan());
1268
1269        // Roundtrip
1270        let a = 3.5;
1271        let p = 0.123456;
1272        let x = inv_reg_lower_gamma(a, p);
1273        let p2 = reg_lower_gamma(a, x);
1274        assert!((p - p2).abs() < 1e-10);
1275
1276        assert!(reg_lower_gamma(1e8, 1e8).is_finite());
1277        assert!(reg_lower_gamma(1e-20, 1e20).is_finite());
1278    }
1279
1280    #[test]
1281    fn test_inv_reg_lower_gamma() {
1282        // scipy.special.gammaincinv(2.0, 0.5939941502901616) == 2.0
1283        assert!((inv_reg_lower_gamma(2.0, 0.5939941502901616) - 2.0).abs() < 1e-10);
1284        // scipy.special.gammaincinv(5.0, 0.003659846827343713) == 1.0000000000000002
1285        assert!(
1286            (inv_reg_lower_gamma(5.0, 0.003659846827343713) - 1.0000000000000002).abs() < 1e-10
1287        );
1288        // scipy.special.gammaincinv(2.0, np.nan) == nan
1289        assert!(inv_reg_lower_gamma(2.0, f64::NAN).is_nan());
1290        // scipy.special.gammaincinv(np.nan, 2.0) == nan
1291        assert!(inv_reg_lower_gamma(f64::NAN, 2.0).is_nan());
1292    }
1293
1294    #[test]
1295    fn test_binomial_cdf_scalar() {
1296        // scipy.stats.binom.cdf(2, 4, 0.5) == 0.6875
1297        assert!((binomial_cdf_scalar(2, 4, 0.5) - 0.6875).abs() < 1e-14);
1298        // scipy.stats.binom.cdf(0, 4, 0.5) == 0.0625
1299        assert!((binomial_cdf_scalar(0, 4, 0.5) - 0.0625).abs() < 1e-14);
1300        // scipy.stats.binom.cdf(0, 4, 0.0) == 1.0
1301        assert!((binomial_cdf_scalar(0, 4, 0.0) - 1.0).abs() < 1e-14);
1302        // scipy.stats.binom.cdf(2, 4, 0.0) == 1.0
1303        assert!((binomial_cdf_scalar(2, 4, 0.0) - 1.0).abs() < 1e-14);
1304        // scipy.stats.binom.cdf(4, 4, 1.0) == 1.0
1305        assert!((binomial_cdf_scalar(4, 4, 1.0) - 1.0).abs() < 1e-14);
1306        // scipy.stats.binom.cdf(2, 4, 1.0) == 0.0
1307        assert!((binomial_cdf_scalar(2, 4, 1.0) - 0.0).abs() < 1e-14);
1308
1309        // For any k >= 0 and p == 0.0, result is 1.0
1310        // scipy.stats.binom.cdf(10, 4, 0.0) == 1.0
1311        assert!((binomial_cdf_scalar(10, 4, 0.0) - 1.0).abs() < 1e-14);
1312        // scipy.stats.binom.cdf(0, 0, 0.0) == 1.0
1313        assert!((binomial_cdf_scalar(0, 0, 0.0) - 1.0).abs() < 1e-14);
1314
1315        // For any k < n and p == 1.0, result is 0.0
1316        // scipy.stats.binom.cdf(0, 4, 1.0) == 0.0
1317        assert!((binomial_cdf_scalar(0, 4, 1.0) - 0.0).abs() < 1e-14);
1318
1319        // For any k >= n and p == 1.0, result is 1.0
1320        // scipy.stats.binom.cdf(5, 4, 1.0) == 1.0
1321        assert!((binomial_cdf_scalar(5, 4, 1.0) - 1.0).abs() < 1e-14);
1322
1323        // Degenerate n=0, all probability at 0
1324        // scipy.stats.binom.cdf(0, 0, 0.5) == 1.0
1325        assert!((binomial_cdf_scalar(0, 0, 0.5) - 1.0).abs() < 1e-14);
1326        // scipy.stats.binom.cdf(1, 0, 0.5) == 1.0
1327        assert!((binomial_cdf_scalar(1, 0, 0.5) - 1.0).abs() < 1e-14);
1328
1329        // scipy.stats.binom.cdf(0, 0, 0.5) == 1.0
1330        assert!((binomial_cdf_scalar(0, 0, 0.5) - 1.0).abs() < 1e-14);
1331        // scipy.stats.binom.cdf(-1, 0, 0.5) == 0.0
1332        assert!((binomial_cdf_scalar(-1i64, 0, 0.5) - 0.0).abs() < 1e-14);
1333        // scipy.stats.binom.cdf(0, 0, 0.0) == 1.0
1334        assert!((binomial_cdf_scalar(0, 0, 0.0) - 1.0).abs() < 1e-14);
1335        // scipy.stats.binom.cdf(0, 0, 1.0) == 1.0
1336        assert!((binomial_cdf_scalar(0, 0, 1.0) - 1.0).abs() < 1e-14);
1337        // scipy.stats.binom.cdf(10, 0, 0.0) == 1.0
1338        assert!((binomial_cdf_scalar(10, 0, 0.0) - 1.0).abs() < 1e-14);
1339        // scipy.stats.binom.cdf(10, 0, 1.0) == 1.0
1340        assert!((binomial_cdf_scalar(10, 0, 1.0) - 1.0).abs() < 1e-14);
1341        // scipy.stats.binom.cdf(10, 10, 0.0) == 1.0
1342        assert!((binomial_cdf_scalar(10, 10, 0.0) - 1.0).abs() < 1e-14);
1343        // scipy.stats.binom.cdf(10, 10, 1.0) == 1.0
1344        assert!((binomial_cdf_scalar(10, 10, 1.0) - 1.0).abs() < 1e-14);
1345
1346        // scipy.stats.binom.cdf(5, 10000, 1e-4) == 0.999406428148761
1347        assert!((binomial_cdf_scalar(5, 10000, 1e-4) - 0.999406428148761).abs() < 1e-12);
1348
1349        // Monotonicity & limits:
1350        for n in [0_i64, 1, 10, 100] {
1351            for k in 0..=n {
1352                assert!(
1353                    binomial_cdf_scalar(k, n.try_into().unwrap(), 0.3)
1354                        <= binomial_cdf_scalar(k + 1, n.try_into().unwrap(), 0.3)
1355                );
1356            }
1357        }
1358        assert!((binomial_cdf_scalar(0, 1000000, 0.5) - (0.5f64).powi(1000000)).abs() < 1e-14);
1359        assert!((binomial_cdf_scalar(1000000, 1000000, 0.5) - 1.0).abs() < 1e-14);
1360        // scipy.stats.binom.cdf(10, 100, sys.float_info.min() == 1.0
1361        assert!((binomial_cdf_scalar(10, 100, f64::MIN_POSITIVE) - 1.0).abs() < 1e-14);
1362        // scipy.stats.binom.cdf(10, 100, 1.0 - np.finfo(float).eps) == 0.0
1363        assert!((binomial_cdf_scalar(10, 100, 1.0 - f64::EPSILON) - 0.0).abs() < 1e-14);
1364    }
1365
1366    #[test]
1367    fn test_binomial_quantile_cornish_fisher() {
1368        // scipy.stats.binom.ppf(0.5, 10, 0.5) == 5.0
1369        assert!((binomial_quantile_cornish_fisher(0.5, 10, 0.5) - 5.0).abs() < 1e-14);
1370        // scipy.stats.binom.ppf(1e-10, 10, 0.5) == 0.0
1371        assert!((binomial_quantile_cornish_fisher(1e-10, 10, 0.5) - 0.0).abs() < 1e-8);
1372        // scipy.stats.binom.ppf(1.0 - 1e-10, 10, 0.5) == 10.0
1373        assert!((binomial_quantile_cornish_fisher(1.0 - 1e-10, 10, 0.5) - 10.0).abs() < 1e-8);
1374
1375        // scipy.stats.binom.ppf(0.0, 20, 0.3) == -1.0
1376        assert_eq!(binomial_quantile_cornish_fisher(0.0, 20, 0.3), -1.0);
1377        // scipy.stats.binom.ppf(1.0, 20, 0.3) == 20
1378        assert_eq!(binomial_quantile_cornish_fisher(1.0, 20, 0.3), 20.0);
1379    }
1380
1381    #[test]
1382    fn test_ln_gamma_additional_edges() {
1383        // Poles at all non-positive integers
1384        for i in 0..100 {
1385            let x = -(i as f64);
1386            assert!(ln_gamma(x).is_infinite());
1387        }
1388
1389        // Just above/below a pole (should not be infinite or NaN)
1390        for i in 0..10 {
1391            let x = -(i as f64);
1392            let just_above = x + 1e-12;
1393            let just_below = x - 1e-12;
1394            assert!(
1395                ln_gamma(just_above).is_finite(),
1396                "ln_gamma({}) not finite",
1397                just_above
1398            );
1399            assert!(
1400                ln_gamma(just_below).is_finite(),
1401                "ln_gamma({}) not finite",
1402                just_below
1403            );
1404        }
1405    }
1406}