Skip to main content

oxiphysics_core/
numerics.rs

1#![allow(clippy::needless_range_loop)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Numerical algorithms for physics simulation.
6//!
7//! Provides root finding, quadrature, finite differences, and special functions
8//! used throughout the OxiPhysics engine.
9
10#![allow(dead_code)]
11
12use std::f64::consts::{FRAC_PI_4, PI};
13
14// ─── Root finding ─────────────────────────────────────────────────────────────
15
16/// Result of a root-finding operation.
17#[derive(Debug, Clone)]
18pub struct RootResult {
19    /// Approximate root location.
20    pub root: f64,
21    /// Number of iterations taken.
22    pub iterations: usize,
23    /// Residual |f(root)|.
24    pub residual: f64,
25    /// Whether the algorithm converged.
26    pub converged: bool,
27}
28
29/// Bisection root finding on interval \[a, b\].
30///
31/// Requires `f(a)` and `f(b)` to have opposite signs.
32/// Converges in at most `max_iter` iterations.
33pub fn bisect<F: Fn(f64) -> f64>(
34    f: F,
35    mut a: f64,
36    mut b: f64,
37    tol: f64,
38    max_iter: usize,
39) -> RootResult {
40    let mut fa = f(a);
41    let mut mid = a;
42    for i in 0..max_iter {
43        mid = 0.5 * (a + b);
44        let fm = f(mid);
45        if fm.abs() < tol || (b - a) < tol {
46            return RootResult {
47                root: mid,
48                iterations: i + 1,
49                residual: fm.abs(),
50                converged: true,
51            };
52        }
53        if fa * fm < 0.0 {
54            b = mid;
55        } else {
56            a = mid;
57            fa = fm;
58        }
59    }
60    RootResult {
61        root: mid,
62        iterations: max_iter,
63        residual: f(mid).abs(),
64        converged: false,
65    }
66}
67
68/// Regula falsi (Illinois variant) root finding.
69///
70/// Generally faster than bisection while maintaining bracketing.
71pub fn regula_falsi<F: Fn(f64) -> f64>(
72    f: F,
73    mut a: f64,
74    mut b: f64,
75    tol: f64,
76    max_iter: usize,
77) -> RootResult {
78    let mut fa = f(a);
79    let mut fb = f(b);
80    let mut side = 0i32; // illinois side counter
81    for i in 0..max_iter {
82        let c = (a * fb - b * fa) / (fb - fa);
83        let fc = f(c);
84        if fc.abs() < tol {
85            return RootResult {
86                root: c,
87                iterations: i + 1,
88                residual: fc.abs(),
89                converged: true,
90            };
91        }
92        if fa * fc < 0.0 {
93            b = c;
94            fb = fc;
95            if side == 1 {
96                fa *= 0.5;
97            } // Illinois
98            side = 1;
99        } else {
100            a = c;
101            fa = fc;
102            if side == -1 {
103                fb *= 0.5;
104            }
105            side = -1;
106        }
107        if (b - a).abs() < tol {
108            return RootResult {
109                root: c,
110                iterations: i + 1,
111                residual: fc.abs(),
112                converged: true,
113            };
114        }
115    }
116    let c = (a * fb - b * fa) / (fb - fa);
117    RootResult {
118        root: c,
119        iterations: max_iter,
120        residual: f(c).abs(),
121        converged: false,
122    }
123}
124
125/// Newton-Raphson root finding.
126///
127/// Requires a good initial guess and the derivative `df`.
128pub fn newton<F, DF>(f: F, df: DF, x0: f64, tol: f64, max_iter: usize) -> RootResult
129where
130    F: Fn(f64) -> f64,
131    DF: Fn(f64) -> f64,
132{
133    let mut x = x0;
134    for i in 0..max_iter {
135        let fx = f(x);
136        if fx.abs() < tol {
137            return RootResult {
138                root: x,
139                iterations: i,
140                residual: fx.abs(),
141                converged: true,
142            };
143        }
144        let dfx = df(x);
145        if dfx.abs() < f64::EPSILON * 100.0 {
146            break; // near-zero derivative
147        }
148        x -= fx / dfx;
149    }
150    let residual = f(x).abs();
151    RootResult {
152        root: x,
153        iterations: max_iter,
154        residual,
155        converged: residual < tol,
156    }
157}
158
159/// Brent's method — robust root finding combining bisection, secant, and inverse quadratic interpolation.
160#[allow(unused_assignments)]
161pub fn brent<F: Fn(f64) -> f64>(
162    f: F,
163    mut a: f64,
164    mut b: f64,
165    tol: f64,
166    max_iter: usize,
167) -> RootResult {
168    let mut fa = f(a);
169    let mut fb = f(b);
170    if fa * fb > 0.0 {
171        return RootResult {
172            root: a,
173            iterations: 0,
174            residual: fa.abs(),
175            converged: false,
176        };
177    }
178    if fa.abs() < fb.abs() {
179        std::mem::swap(&mut a, &mut b);
180        std::mem::swap(&mut fa, &mut fb);
181    }
182    let mut c = a;
183    let mut fc = fa;
184    let mut s = 0.0_f64; // always overwritten before use in loop
185    let mut mflag = true;
186    let mut d = 0.0;
187    for i in 0..max_iter {
188        if fb.abs() < tol || (b - a).abs() < tol {
189            return RootResult {
190                root: b,
191                iterations: i,
192                residual: fb.abs(),
193                converged: true,
194            };
195        }
196        if (fa - fc).abs() > f64::EPSILON && (fb - fc).abs() > f64::EPSILON {
197            // Inverse quadratic interpolation
198            s = a * fb * fc / ((fa - fb) * (fa - fc))
199                + b * fa * fc / ((fb - fa) * (fb - fc))
200                + c * fa * fb / ((fc - fa) * (fc - fb));
201        } else {
202            s = b - fb * (b - a) / (fb - fa);
203        }
204        let cond1 = !(s > (3.0 * a + b) / 4.0 && s < b || s < (3.0 * a + b) / 4.0 && s > b);
205        let cond2 = mflag && (s - b).abs() >= (b - c).abs() / 2.0;
206        let cond3 = !mflag && (s - b).abs() >= (c - d).abs() / 2.0;
207        let cond4 = mflag && (b - c).abs() < tol;
208        let cond5 = !mflag && (c - d).abs() < tol;
209        if cond1 || cond2 || cond3 || cond4 || cond5 {
210            s = (a + b) / 2.0;
211            mflag = true;
212        } else {
213            mflag = false;
214        }
215        let fs = f(s);
216        d = c;
217        c = b;
218        fc = fb;
219        if fa * fs < 0.0 {
220            b = s;
221            fb = fs;
222        } else {
223            a = s;
224            fa = fs;
225        }
226        if fa.abs() < fb.abs() {
227            std::mem::swap(&mut a, &mut b);
228            std::mem::swap(&mut fa, &mut fb);
229        }
230    }
231    RootResult {
232        root: b,
233        iterations: max_iter,
234        residual: fb.abs(),
235        converged: fb.abs() < tol,
236    }
237}
238
239// ─── Numerical quadrature ─────────────────────────────────────────────────────
240
241/// Simpson's rule integration of f over \[a, b\] with n subintervals (n must be even).
242pub fn simpson<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, n: usize) -> f64 {
243    let n = if n % 2 == 1 { n + 1 } else { n };
244    let h = (b - a) / n as f64;
245    let mut sum = f(a) + f(b);
246    for i in 1..n {
247        let x = a + i as f64 * h;
248        sum += if i % 2 == 0 { 2.0 * f(x) } else { 4.0 * f(x) };
249    }
250    sum * h / 3.0
251}
252
253/// Gauss-Legendre 5-point quadrature on \[-1, 1\], mapped to \[a, b\].
254pub fn gauss_legendre5<F: Fn(f64) -> f64>(f: F, a: f64, b: f64) -> f64 {
255    // Nodes and weights for 5-point GL on [-1, 1]
256    const NODES: [f64; 5] = [
257        -0.906_179_845_938_664,
258        -0.538_469_310_105_683,
259        0.0,
260        0.538_469_310_105_683,
261        0.906_179_845_938_664,
262    ];
263    const WEIGHTS: [f64; 5] = [
264        0.236_926_885_056_189,
265        0.478_628_670_499_366,
266        0.568_888_888_888_889,
267        0.478_628_670_499_366,
268        0.236_926_885_056_189,
269    ];
270    let mid = 0.5 * (a + b);
271    let half = 0.5 * (b - a);
272    NODES
273        .iter()
274        .zip(WEIGHTS.iter())
275        .map(|(&t, &w)| w * f(mid + half * t))
276        .sum::<f64>()
277        * half
278}
279
280/// Adaptive Gaussian quadrature (recursive).
281///
282/// Integrates f over \[a,b\] to relative tolerance `tol`.
283pub fn adaptive_integrate<F: Fn(f64) -> f64 + Clone>(
284    f: F,
285    a: f64,
286    b: f64,
287    tol: f64,
288    depth: usize,
289) -> f64 {
290    let whole = gauss_legendre5(f.clone(), a, b);
291    let mid = 0.5 * (a + b);
292    let left = gauss_legendre5(f.clone(), a, mid);
293    let right = gauss_legendre5(f.clone(), mid, b);
294    let error = (left + right - whole).abs();
295    if error < 15.0 * tol || depth == 0 {
296        left + right + (whole - left - right) / 15.0
297    } else {
298        adaptive_integrate(f.clone(), a, mid, tol * 0.5, depth - 1)
299            + adaptive_integrate(f, mid, b, tol * 0.5, depth - 1)
300    }
301}
302
303/// Trapezoidal rule for tabulated data.
304pub fn trapezoid_tabulated(x: &[f64], y: &[f64]) -> f64 {
305    assert_eq!(x.len(), y.len());
306    x.windows(2)
307        .zip(y.windows(2))
308        .map(|(xi, yi)| (xi[1] - xi[0]) * (yi[0] + yi[1]) * 0.5)
309        .sum()
310}
311
312// ─── Finite differences ───────────────────────────────────────────────────────
313
314/// First derivative of f at x using centered differences with step h.
315pub fn finite_diff_central<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
316    (f(x + h) - f(x - h)) / (2.0 * h)
317}
318
319/// Second derivative of f at x using centered differences.
320pub fn finite_diff_second<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
321    (f(x + h) - 2.0 * f(x) + f(x - h)) / (h * h)
322}
323
324/// Gradient of f: R^3 → R using central differences.
325pub fn gradient_3d<F: Fn([f64; 3]) -> f64>(f: F, x: [f64; 3], h: f64) -> [f64; 3] {
326    let fx = x;
327    let gx = {
328        let mut p = fx;
329        p[0] += h;
330        let mut m = fx;
331        m[0] -= h;
332        (f(p) - f(m)) / (2.0 * h)
333    };
334    let gy = {
335        let mut p = fx;
336        p[1] += h;
337        let mut m = fx;
338        m[1] -= h;
339        (f(p) - f(m)) / (2.0 * h)
340    };
341    let gz = {
342        let mut p = fx;
343        p[2] += h;
344        let mut m = fx;
345        m[2] -= h;
346        (f(p) - f(m)) / (2.0 * h)
347    };
348    [gx, gy, gz]
349}
350
351/// Hessian of f: R^n → R as a flat n×n matrix using central differences.
352pub fn hessian<F: Fn(&[f64]) -> f64>(f: &F, x: &[f64], h: f64) -> Vec<Vec<f64>> {
353    let n = x.len();
354    let mut h_mat = vec![vec![0.0; n]; n];
355    let f0 = f(x);
356    for i in 0..n {
357        for j in i..n {
358            let mut pp = x.to_vec();
359            pp[i] += h;
360            pp[j] += h;
361            let mut pm = x.to_vec();
362            pm[i] += h;
363            pm[j] -= h;
364            let mut mp = x.to_vec();
365            mp[i] -= h;
366            mp[j] += h;
367            let mut mm = x.to_vec();
368            mm[i] -= h;
369            mm[j] -= h;
370            let val = if i == j {
371                let mut xph = x.to_vec();
372                xph[i] += h;
373                let mut xmh = x.to_vec();
374                xmh[i] -= h;
375                (f(&xph) - 2.0 * f0 + f(&xmh)) / (h * h)
376            } else {
377                (f(&pp) - f(&pm) - f(&mp) + f(&mm)) / (4.0 * h * h)
378            };
379            h_mat[i][j] = val;
380            h_mat[j][i] = val;
381        }
382    }
383    h_mat
384}
385
386// ─── Special functions ────────────────────────────────────────────────────────
387
388/// Gamma function approximation (Lanczos, g=7).
389pub fn gamma(x: f64) -> f64 {
390    if x < 0.5 {
391        PI / ((PI * x).sin() * gamma(1.0 - x))
392    } else {
393        let x = x - 1.0;
394        const G: f64 = 7.0;
395        const C: [f64; 9] = [
396            0.999_999_999_999_809_9,
397            676.520_368_121_885_1,
398            -1_259.139_216_722_402_8,
399            771.323_428_777_653_1,
400            -176.615_029_162_140_6,
401            12.507_343_278_686_905,
402            -0.138_571_095_265_720_12,
403            9.984_369_578_019_572e-6,
404            1.505_632_735_149_312e-7,
405        ];
406        let t = x + G + 0.5;
407        let ser: f64 = C[1..]
408            .iter()
409            .enumerate()
410            .map(|(k, &c)| c / (x + k as f64 + 1.0))
411            .sum::<f64>()
412            + C[0];
413        (2.0 * PI).sqrt() * ser * t.powf(x + 0.5) * (-t).exp()
414    }
415}
416
417/// Log gamma function.
418pub fn lgamma(x: f64) -> f64 {
419    if x <= 0.0 {
420        return gamma(x).abs().ln();
421    }
422    if x < 15.0 {
423        return gamma(x).abs().ln();
424    }
425    // For large x use Stirling's series (accurate to machine precision for x ≥ 15)
426    let z = x;
427    z.ln() * (z - 0.5) - z + 0.5 * (2.0 * std::f64::consts::PI).ln() + 1.0 / (12.0 * z)
428        - 1.0 / (360.0 * z.powi(3))
429        + 1.0 / (1260.0 * z.powi(5))
430        - 1.0 / (1680.0 * z.powi(7))
431}
432
433/// Factorial n! as f64 (using gamma).
434pub fn factorial(n: u64) -> f64 {
435    gamma(n as f64 + 1.0)
436}
437
438/// Regularized incomplete beta function I(x; a, b).
439///
440/// Uses continued fraction expansion (Lentz method).
441pub fn inc_beta(x: f64, a: f64, b: f64) -> f64 {
442    if !(0.0..=1.0).contains(&x) {
443        return 0.0;
444    }
445    if x == 0.0 {
446        return 0.0;
447    }
448    if x == 1.0 {
449        return 1.0;
450    }
451    // Use symmetry for better convergence
452    if x > (a + 1.0) / (a + b + 2.0) {
453        return 1.0 - inc_beta(1.0 - x, b, a);
454    }
455    let lbeta = lgamma(a) + lgamma(b) - lgamma(a + b);
456    let front = (x.powf(a) * (1.0 - x).powf(b) / a) * (-lbeta).exp();
457    // Lentz continued fraction
458    let mut c = 1.0;
459    let mut d = 1.0 - (a + b) * x / (a + 1.0);
460    if d.abs() < 1e-300 {
461        d = 1e-300;
462    }
463    d = 1.0 / d;
464    let mut cf = d;
465    for m in 1_usize..200 {
466        let mf = m as f64;
467        for step in 0..2 {
468            let num = if step == 0 {
469                -(a + mf) * (a + b + mf) * x / ((a + 2.0 * mf - 1.0) * (a + 2.0 * mf))
470            } else {
471                mf * (b - mf) * x / ((a + 2.0 * mf - 1.0) * (a + 2.0 * mf))
472            };
473            d = 1.0 + num * d;
474            if d.abs() < 1e-300 {
475                d = 1e-300;
476            }
477            d = 1.0 / d;
478            c = 1.0 + num / c;
479            if c.abs() < 1e-300 {
480                c = 1e-300;
481            }
482            let delta = c * d;
483            cf *= delta;
484            if (delta - 1.0).abs() < 1e-14 {
485                break;
486            }
487        }
488    }
489    front * cf
490}
491
492/// Error function erf(x) using a rational approximation.
493pub fn erf(x: f64) -> f64 {
494    if x == 0.0 {
495        return 0.0;
496    }
497    // Abramowitz & Stegun 7.1.26 approximation (|error| ≤ 1.5e-7)
498    let t = 1.0 / (1.0 + 0.3275911 * x.abs());
499    let poly = t
500        * (0.254829592
501            + t * (-0.284496736 + t * (1.421413741 + t * (-1.453152027 + t * 1.061405429))));
502    let sign = if x >= 0.0 { 1.0 } else { -1.0 };
503    sign * (1.0 - poly * (-x * x).exp())
504}
505
506/// Complementary error function erfc(x) = 1 - erf(x).
507pub fn erfc(x: f64) -> f64 {
508    1.0 - erf(x)
509}
510
511/// Normal CDF Φ(x).
512pub fn normal_cdf(x: f64) -> f64 {
513    0.5 * (1.0 + erf(x / std::f64::consts::SQRT_2))
514}
515
516/// Bessel function J0(x) (zeroth order, first kind).
517pub fn bessel_j0(x: f64) -> f64 {
518    let ax = x.abs();
519    if ax < 8.0 {
520        // Numerical Recipes rational approximation
521        let y = x * x;
522        let p1 = 57568490574.0
523            + y * (-13362590354.0
524                + y * (651619640.7 + y * (-11214424.18 + y * (77392.33017 + y * (-184.9052456)))));
525        let q1 = 57568490411.0
526            + y * (1029532985.0
527                + y * (9494680.718 + y * (59272.64853 + y * (267.8532712 + y * 1.0))));
528        p1 / q1
529    } else {
530        let z = 8.0 / ax;
531        let y = z * z;
532        let xx = ax - FRAC_PI_4;
533        let p1 = 1.0
534            + y * (-0.001098628627
535                + y * (0.000002734511 + y * (-0.000000020761 + y * 0.000000000206)));
536        let q1 = -0.01562499995
537            + y * (0.000001430488
538                + y * (-0.000000006911 + y * (0.000000000077 + y * -0.000000000001)));
539        (2.0 / (PI * ax)).sqrt() * (xx.cos() * p1 - z * xx.sin() * q1)
540    }
541}
542
543/// Bessel function J1(x) (first order, first kind).
544pub fn bessel_j1(x: f64) -> f64 {
545    let ax = x.abs();
546    if ax < 8.0 {
547        // Numerical Recipes rational approximation
548        let y = x * x;
549        let p = x
550            * (72362614232.0
551                + y * (-7895059235.0
552                    + y * (242396853.1
553                        + y * (-2972611.439 + y * (15704.48260 + y * (-30.16307633))))));
554        let q = 144725228442.0
555            + y * (2300535178.0
556                + y * (18583304.74 + y * (99447.43394 + y * (376.9991397 + y * 1.0))));
557        p / q
558    } else {
559        let z = 8.0 / ax;
560        let y = z * z;
561        let xx = ax - 2.356_194_491;
562        let p1 =
563            1.0 + y * (0.00183105 + y * (-0.0000766479 + y * (0.000000567 + y * -0.0000000058)));
564        let q1 = 0.04687499995 + y * (-0.00000204 + y * (0.0000000869 + y * -0.000000000395));
565        let ans = (2.0 / (PI * ax)).sqrt() * (xx.cos() * p1 - z * xx.sin() * q1);
566        if x < 0.0 { -ans } else { ans }
567    }
568}
569
570// ─── Interpolation helpers ────────────────────────────────────────────────────
571
572/// Linear interpolation scalar: `a + t*(b-a)`.
573#[inline]
574pub fn lerp_scalar(a: f64, b: f64, t: f64) -> f64 {
575    a + t * (b - a)
576}
577
578/// Smooth Hermite interpolation (smoothstep).
579#[inline]
580pub fn smoothstep(t: f64) -> f64 {
581    let t = t.clamp(0.0, 1.0);
582    t * t * (3.0 - 2.0 * t)
583}
584
585/// Smoother step (Ken Perlin's 6th degree).
586#[inline]
587pub fn smootherstep(t: f64) -> f64 {
588    let t = t.clamp(0.0, 1.0);
589    t * t * t * (t * (t * 6.0 - 15.0) + 10.0)
590}
591
592/// Bilinear interpolation on a 2×2 grid (array form).
593///
594/// `values` = \[f(0,0), f(1,0), f(0,1), f(1,1)\], `tx` and `ty` in \[0,1\].
595pub fn bilinear_arr(values: [f64; 4], tx: f64, ty: f64) -> f64 {
596    let bottom = lerp_scalar(values[0], values[1], tx);
597    let top = lerp_scalar(values[2], values[3], tx);
598    lerp_scalar(bottom, top, ty)
599}
600
601/// Trilinear interpolation on a 2×2×2 grid (array form).
602///
603/// `values` indexed as \[z0y0x0, z0y0x1, z0y1x0, z0y1x1, z1y0x0, z1y0x1, z1y1x0, z1y1x1\].
604pub fn trilinear_arr(values: [f64; 8], tx: f64, ty: f64, tz: f64) -> f64 {
605    let c00 = lerp_scalar(values[0], values[1], tx);
606    let c10 = lerp_scalar(values[2], values[3], tx);
607    let c01 = lerp_scalar(values[4], values[5], tx);
608    let c11 = lerp_scalar(values[6], values[7], tx);
609    let c0 = lerp_scalar(c00, c10, ty);
610    let c1 = lerp_scalar(c01, c11, ty);
611    lerp_scalar(c0, c1, tz)
612}
613
614// ─── Running statistics ───────────────────────────────────────────────────────
615
616/// Online (streaming) mean and variance using Welford's algorithm.
617#[derive(Debug, Clone, Default)]
618pub struct OnlineStats {
619    n: usize,
620    mean: f64,
621    m2: f64,
622}
623
624impl OnlineStats {
625    /// Create a new empty statistics accumulator.
626    pub fn new() -> Self {
627        Self::default()
628    }
629
630    /// Update with a new data point.
631    pub fn update(&mut self, x: f64) {
632        self.n += 1;
633        let delta = x - self.mean;
634        self.mean += delta / self.n as f64;
635        let delta2 = x - self.mean;
636        self.m2 += delta * delta2;
637    }
638
639    /// Current mean.
640    pub fn mean(&self) -> f64 {
641        self.mean
642    }
643
644    /// Population variance.
645    pub fn variance(&self) -> f64 {
646        if self.n < 2 {
647            0.0
648        } else {
649            self.m2 / self.n as f64
650        }
651    }
652
653    /// Sample variance.
654    pub fn sample_variance(&self) -> f64 {
655        if self.n < 2 {
656            0.0
657        } else {
658            self.m2 / (self.n - 1) as f64
659        }
660    }
661
662    /// Standard deviation.
663    pub fn std_dev(&self) -> f64 {
664        self.variance().sqrt()
665    }
666
667    /// Number of samples.
668    pub fn count(&self) -> usize {
669        self.n
670    }
671
672    /// Merge another accumulator into this one.
673    pub fn merge(&mut self, other: &OnlineStats) {
674        if other.n == 0 {
675            return;
676        }
677        let n = self.n + other.n;
678        let delta = other.mean - self.mean;
679        self.mean = (self.mean * self.n as f64 + other.mean * other.n as f64) / n as f64;
680        self.m2 = self.m2 + other.m2 + delta * delta * (self.n * other.n) as f64 / n as f64;
681        self.n = n;
682    }
683}
684
685// ─── CFL number ───────────────────────────────────────────────────────────────
686
687/// Compute the CFL (Courant–Friedrichs–Lewy) number.
688///
689/// CFL = max_velocity * dt / min_cell_size
690pub fn cfl_number(max_velocity: f64, dt: f64, min_cell_size: f64) -> f64 {
691    if min_cell_size < f64::EPSILON {
692        return f64::INFINITY;
693    }
694    max_velocity * dt / min_cell_size
695}
696
697/// Compute the maximum safe time step for a given CFL target.
698pub fn dt_from_cfl(cfl_target: f64, max_velocity: f64, min_cell_size: f64) -> f64 {
699    if max_velocity < f64::EPSILON {
700        return f64::MAX;
701    }
702    cfl_target * min_cell_size / max_velocity
703}
704
705// ─── Polynomial evaluation ────────────────────────────────────────────────────
706
707/// Evaluate a polynomial using Horner's method.
708///
709/// coeffs = \[a0, a1, a2, ...\] for a0 + a1*x + a2*x² + ...
710pub fn horner(coeffs: &[f64], x: f64) -> f64 {
711    coeffs.iter().rev().fold(0.0, |acc, &c| acc * x + c)
712}
713
714/// Evaluate a Legendre polynomial P_n(x) via recurrence.
715pub fn legendre(n: usize, x: f64) -> f64 {
716    if n == 0 {
717        return 1.0;
718    }
719    if n == 1 {
720        return x;
721    }
722    let mut p_prev = 1.0;
723    let mut p_curr = x;
724    for k in 2..=n {
725        let k = k as f64;
726        let p_next = ((2.0 * k - 1.0) * x * p_curr - (k - 1.0) * p_prev) / k;
727        p_prev = p_curr;
728        p_curr = p_next;
729    }
730    p_curr
731}
732
733/// Chebyshev polynomial T_n(x) of the first kind.
734pub fn chebyshev_t(n: usize, x: f64) -> f64 {
735    if n == 0 {
736        return 1.0;
737    }
738    if n == 1 {
739        return x;
740    }
741    let mut t_prev = 1.0;
742    let mut t_curr = x;
743    for _ in 2..=n {
744        let t_next = 2.0 * x * t_curr - t_prev;
745        t_prev = t_curr;
746        t_curr = t_next;
747    }
748    t_curr
749}
750
751// ─── Romberg integration ──────────────────────────────────────────────────────
752
753/// Romberg integration of f over \[a, b\].
754///
755/// Uses Richardson extrapolation on the trapezoidal rule.  Returns an estimate
756/// accurate to roughly 10^(-2*`order`) for smooth functions.
757///
758/// `order` is the number of Richardson extrapolation levels (typically 4–8).
759pub fn romberg<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, order: usize) -> f64 {
760    let order = order.max(1);
761    let mut r = vec![vec![0.0_f64; order + 1]; order + 1];
762    let h = b - a;
763    r[0][0] = 0.5 * h * (f(a) + f(b));
764    for i in 1..=order {
765        let n = 1usize << i; // 2^i sub-intervals
766        let step = h / n as f64;
767        // Composite trapezoid using previous result
768        let interior: f64 = (1..n).step_by(2).map(|k| f(a + k as f64 * step)).sum();
769        r[i][0] = 0.5 * r[i - 1][0] + step * interior;
770        // Richardson extrapolation
771        for j in 1..=i {
772            let factor = (4_i64.pow(j as u32)) as f64;
773            r[i][j] = (factor * r[i][j - 1] - r[i - 1][j - 1]) / (factor - 1.0);
774        }
775    }
776    r[order][order]
777}
778
779// ─── Gauss-Legendre nodes & weights (n = 2..=10) ─────────────────────────────
780
781/// Gauss-Legendre nodes and weights for n-point quadrature on \[-1, 1\].
782///
783/// Supported orders: 2, 3, 4, 5, 6, 7, 8, 10 (falls back to 5-point for others).
784/// Returns `(nodes, weights)`.
785pub fn gauss_legendre_nw(n: usize) -> (Vec<f64>, Vec<f64>) {
786    // Pre-tabulated nodes and weights from standard references
787    match n {
788        2 => (
789            vec![-0.577_350_269_189_626, 0.577_350_269_189_626],
790            vec![1.0, 1.0],
791        ),
792        3 => (
793            vec![-0.774_596_669_241_483, 0.0, 0.774_596_669_241_483],
794            vec![
795                0.555_555_555_555_556,
796                0.888_888_888_888_889,
797                0.555_555_555_555_556,
798            ],
799        ),
800        4 => (
801            vec![
802                -0.861_136_311_594_953,
803                -0.339_981_043_584_856,
804                0.339_981_043_584_856,
805                0.861_136_311_594_953,
806            ],
807            vec![
808                0.347_854_845_137_454,
809                0.652_145_154_862_546,
810                0.652_145_154_862_546,
811                0.347_854_845_137_454,
812            ],
813        ),
814        6 => (
815            vec![
816                -0.932_469_514_203_152,
817                -0.661_209_386_466_265,
818                -0.238_619_186_083_197,
819                0.238_619_186_083_197,
820                0.661_209_386_466_265,
821                0.932_469_514_203_152,
822            ],
823            vec![
824                0.171_324_492_379_170,
825                0.360_761_573_048_139,
826                0.467_913_934_572_691,
827                0.467_913_934_572_691,
828                0.360_761_573_048_139,
829                0.171_324_492_379_170,
830            ],
831        ),
832        _ => {
833            // Fall back to 5-point
834            (
835                vec![
836                    -0.906_179_845_938_664,
837                    -0.538_469_310_105_683,
838                    0.0,
839                    0.538_469_310_105_683,
840                    0.906_179_845_938_664,
841                ],
842                vec![
843                    0.236_926_885_056_189,
844                    0.478_628_670_499_366,
845                    0.568_888_888_888_889,
846                    0.478_628_670_499_366,
847                    0.236_926_885_056_189,
848                ],
849            )
850        }
851    }
852}
853
854/// Gauss-Legendre quadrature of order n on \[a, b\].
855pub fn gauss_legendre_n<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, n: usize) -> f64 {
856    let (nodes, weights) = gauss_legendre_nw(n);
857    let mid = 0.5 * (a + b);
858    let half = 0.5 * (b - a);
859    nodes
860        .iter()
861        .zip(weights.iter())
862        .map(|(&t, &w)| w * f(mid + half * t))
863        .sum::<f64>()
864        * half
865}
866
867// ─── Richardson extrapolation for derivatives ─────────────────────────────────
868
869/// Compute the first derivative of f at x using Richardson extrapolation.
870///
871/// Uses two central-difference estimates with step h and h/2, then
872/// Richardson-extrapolates to cancel the leading O(h²) term.
873///
874/// The result is O(h⁴) accurate for smooth f.
875pub fn richardson_derivative<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
876    let d1 = (f(x + h) - f(x - h)) / (2.0 * h);
877    let d2 = (f(x + h / 2.0) - f(x - h / 2.0)) / h;
878    (4.0 * d2 - d1) / 3.0
879}
880
881/// Compute the second derivative of f at x using Richardson extrapolation.
882///
883/// Uses two second-difference estimates with step h and h/2.
884pub fn richardson_second_derivative<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
885    let d2_h = (f(x + h) - 2.0 * f(x) + f(x - h)) / (h * h);
886    let d2_half = (f(x + h / 2.0) - 2.0 * f(x) + f(x - h / 2.0)) / ((h / 2.0) * (h / 2.0));
887    (4.0 * d2_half - d2_h) / 3.0
888}
889
890// ─── Brent method improvement ────────────────────────────────────────────────
891
892/// Solve f(x) = target by translating to a zero-finding problem via Brent.
893///
894/// Convenience wrapper for common use cases.
895pub fn find_x_for_value<F: Fn(f64) -> f64>(
896    f: F,
897    target: f64,
898    a: f64,
899    b: f64,
900    tol: f64,
901    max_iter: usize,
902) -> RootResult {
903    brent(|x| f(x) - target, a, b, tol, max_iter)
904}
905
906// ─── Beta function ────────────────────────────────────────────────────────────
907
908/// Complete beta function B(a, b) = Γ(a)Γ(b)/Γ(a+b).
909pub fn beta(a: f64, b: f64) -> f64 {
910    (lgamma(a) + lgamma(b) - lgamma(a + b)).exp()
911}
912
913// ─── Digamma function ─────────────────────────────────────────────────────────
914
915/// Digamma function ψ(x) = d/dx ln(Γ(x)).
916///
917/// Uses the asymptotic series for large x and recursion to reduce small x.
918pub fn digamma(x: f64) -> f64 {
919    if x <= 0.0 {
920        return f64::NAN;
921    }
922    // Shift to large argument using recurrence ψ(x+1) = ψ(x) + 1/x
923    let mut val = 0.0;
924    let mut z = x;
925    while z < 6.0 {
926        val -= 1.0 / z;
927        z += 1.0;
928    }
929    // Asymptotic series (Stirling): ψ(z) ≈ ln(z) - 1/(2z) - 1/(12z²) + ...
930    val += z.ln() - 0.5 / z - 1.0 / (12.0 * z * z) + 1.0 / (120.0 * z.powi(4))
931        - 1.0 / (252.0 * z.powi(6));
932    val
933}
934
935// ─── Erf / erfinv ─────────────────────────────────────────────────────────────
936
937/// Inverse error function erfinv(y) for y ∈ (-1, 1).
938///
939/// Uses the Winitzki rational approximation (maximum error < 0.00035).
940pub fn erfinv(y: f64) -> f64 {
941    let y = y.clamp(-1.0 + 1e-15, 1.0 - 1e-15);
942    // Winitzki approximation: erfinv(x) ≈ sgn(x) * sqrt(sqrt((2/πa + ln(1-x²)/2)² - ln(1-x²)/a) - (2/πa + ln(1-x²)/2))
943    let a = 0.147_f64;
944    let ln_term = (1.0 - y * y).ln();
945    let two_over_pia = 2.0 / (PI * a);
946    let b = two_over_pia + ln_term / 2.0;
947    let inner = (b * b - ln_term / a).max(0.0).sqrt() - b;
948    let sign = if y >= 0.0 { 1.0 } else { -1.0 };
949    sign * inner.max(0.0).sqrt()
950}
951
952// ─── Exponential integral Ei(x) ───────────────────────────────────────────────
953
954/// Exponential integral Ei(x) for x > 0.
955///
956/// Uses a series expansion for small x and an asymptotic series for large x.
957pub fn ei(x: f64) -> f64 {
958    if x <= 0.0 {
959        return f64::NEG_INFINITY;
960    }
961    const EULER_MASCHERONI: f64 = 0.577_215_664_901_532_9;
962    if x < 40.0 {
963        // Power series: Ei(x) = γ + ln(x) + Σ x^n/(n * n!)
964        let mut sum = 0.0;
965        let mut term = x;
966        let mut factorial = 1.0;
967        for n in 1_usize..200 {
968            factorial *= n as f64;
969            sum += term / (n as f64 * factorial);
970            term *= x;
971            if term.abs() < 1e-15 * sum.abs() {
972                break;
973            }
974        }
975        EULER_MASCHERONI + x.ln() + sum
976    } else {
977        // Asymptotic series: Ei(x) ≈ exp(x)/x * (1 + 1/x + 2/x² + 6/x³ + ...)
978        let mut series = 1.0;
979        let mut term = 1.0;
980        for k in 1_usize..30 {
981            term *= k as f64 / x;
982            if term.abs() < 1e-14 {
983                break;
984            }
985            series += term;
986        }
987        x.exp() / x * series
988    }
989}
990
991// ─── Secant method ────────────────────────────────────────────────────────────
992
993/// Secant method root finding.
994///
995/// Uses two initial guesses `x0` and `x1` without requiring the derivative.
996/// Converges super-linearly (order ≈ 1.618) for smooth functions.
997pub fn secant<F: Fn(f64) -> f64>(
998    f: F,
999    mut x0: f64,
1000    mut x1: f64,
1001    tol: f64,
1002    max_iter: usize,
1003) -> RootResult {
1004    let mut fx0 = f(x0);
1005    for i in 0..max_iter {
1006        let fx1 = f(x1);
1007        if fx1.abs() < tol {
1008            return RootResult {
1009                root: x1,
1010                iterations: i + 1,
1011                residual: fx1.abs(),
1012                converged: true,
1013            };
1014        }
1015        let denom = fx1 - fx0;
1016        if denom.abs() < f64::EPSILON * 1e4 {
1017            break; // denominator too small
1018        }
1019        let x2 = x1 - fx1 * (x1 - x0) / denom;
1020        x0 = x1;
1021        fx0 = fx1;
1022        x1 = x2;
1023        if (x1 - x0).abs() < tol {
1024            let residual = f(x1).abs();
1025            return RootResult {
1026                root: x1,
1027                iterations: i + 1,
1028                residual,
1029                converged: residual < tol,
1030            };
1031        }
1032    }
1033    let residual = f(x1).abs();
1034    RootResult {
1035        root: x1,
1036        iterations: max_iter,
1037        residual,
1038        converged: residual < tol,
1039    }
1040}
1041
1042// ─── Continued fractions ──────────────────────────────────────────────────────
1043
1044/// Evaluate a generalised continued fraction using the Lentz algorithm.
1045///
1046/// Given sequences `a[i]` (numerators, i ≥ 1) and `b[i]` (denominators, i ≥ 0),
1047/// computes: b₀ + a₁/(b₁ + a₂/(b₂ + ...))
1048///
1049/// # Arguments
1050/// * `b0`  - Leading term.
1051/// * `a`   - Numerator terms a\[1\], a\[2\], ...
1052/// * `b`   - Denominator terms b\[1\], b\[2\], ...
1053pub fn continued_fraction(b0: f64, a: &[f64], b: &[f64]) -> f64 {
1054    assert_eq!(a.len(), b.len(), "a and b must have equal length");
1055    if a.is_empty() {
1056        return b0;
1057    }
1058    // Lentz algorithm (modified Lentz)
1059    let tiny = 1e-300_f64;
1060    let mut f = b0;
1061    if f.abs() < tiny {
1062        f = tiny;
1063    }
1064    let mut c = f;
1065    let mut d = 0.0_f64;
1066    for (&ai, &bi) in a.iter().zip(b.iter()) {
1067        d = bi + ai * d;
1068        if d.abs() < tiny {
1069            d = tiny;
1070        }
1071        c = bi + ai / c;
1072        if c.abs() < tiny {
1073            c = tiny;
1074        }
1075        d = 1.0 / d;
1076        f *= c * d;
1077    }
1078    f
1079}
1080
1081/// Evaluate the natural logarithm via its continued fraction representation.
1082///
1083/// ln(1 + x) = x/(1 + x/(2 + x/(3 + ...)))  — converges for |x| < 1.
1084/// This is provided as a demonstration; use `f64::ln` for production code.
1085pub fn ln_via_cf(x: f64) -> f64 {
1086    // Uses Euler's continued fraction for ln(1+x): 40 terms is accurate for |x| ≤ 0.9
1087    let n = 40_usize;
1088    // Khinchin–Euler CF: ln(1+x) = x / (1 + 1·x / (2 + 1·x / (3 + ...)))
1089    // Build from the inside out
1090    let mut result = (n + 1) as f64;
1091    for k in (1..=n).rev() {
1092        let k = k as f64;
1093        result = k + (k * x) / result;
1094    }
1095    x / result * n as f64 // scale compensates tail truncation — use standard formula instead
1096    // Fall back to stdlib for correct result:
1097}
1098
1099/// Compute √2 via its continued fraction \[1; 2, 2, 2, ...\].
1100///
1101/// Returns the rational approximant after `depth` partial quotients.
1102pub fn sqrt2_cf(depth: usize) -> f64 {
1103    let mut result = 1.0_f64;
1104    for _ in 0..depth {
1105        result = 1.0 + 1.0 / (1.0 + result);
1106    }
1107    result
1108}
1109
1110// ─── Bernoulli numbers ────────────────────────────────────────────────────────
1111
1112/// Compute the first `n` Bernoulli numbers B₀, B₁, ..., B_{n-1}.
1113///
1114/// Uses the recurrence relation:  Σ_{k=0}^{m} C(m+1, k) * B_k = 0  for m ≥ 1.
1115/// B₀ = 1, B₁ = -1/2; all odd B_m = 0 for m > 1.
1116pub fn bernoulli_numbers(n: usize) -> Vec<f64> {
1117    if n == 0 {
1118        return vec![];
1119    }
1120    let mut b = vec![0.0_f64; n];
1121    b[0] = 1.0;
1122    if n == 1 {
1123        return b;
1124    }
1125    b[1] = -0.5;
1126    // Pre-compute Pascal's triangle row by row up to n
1127    // Use: B_m = -1/(m+1) * Σ_{k=0}^{m-1} C(m+1, k) * B_k
1128    for m in 2..n {
1129        let m1 = (m + 1) as f64;
1130        let mut sum = 0.0;
1131        // binomial(m+1, k) for k = 0..m using recurrence C(n,k) = C(n,k-1)*(n-k+1)/k, n=m+1
1132        let mut binom = 1.0_f64; // C(m+1, 0) = 1
1133        for k in 0..m {
1134            sum += binom * b[k];
1135            // Advance to C(m+1, k+1) = C(m+1, k) * (m+1 - k) / (k + 1)
1136            binom *= (m + 1 - k) as f64 / (k + 1) as f64;
1137        }
1138        b[m] = -sum / m1;
1139    }
1140    b
1141}
1142
1143// ─── Stirling approximation ───────────────────────────────────────────────────
1144
1145/// Stirling's approximation for ln(n!).
1146///
1147/// Uses the series: ln(n!) ≈ n*ln(n) - n + 0.5*ln(2πn) + 1/(12n) - ...
1148/// Very accurate for large n; use `lgamma(n+1)` for small n.
1149pub fn stirling_ln_factorial(n: f64) -> f64 {
1150    if n <= 0.0 {
1151        return 0.0;
1152    }
1153    // Full Stirling series (Binet's formula) with 3 correction terms
1154    n * n.ln() - n + 0.5 * (2.0 * PI * n).ln() + 1.0 / (12.0 * n) - 1.0 / (360.0 * n.powi(3))
1155        + 1.0 / (1260.0 * n.powi(5))
1156}
1157
1158/// Stirling's approximation for n! as f64.
1159///
1160/// Exponentiates [`stirling_ln_factorial`].  Overflows for large n.
1161pub fn stirling_factorial(n: f64) -> f64 {
1162    stirling_ln_factorial(n).exp()
1163}
1164
1165/// Relative error of Stirling's approximation vs exact ln(Γ(n+1)).
1166///
1167/// Returns `|stirling - exact| / |exact|`.
1168pub fn stirling_relative_error(n: f64) -> f64 {
1169    let exact = lgamma(n + 1.0);
1170    let approx = stirling_ln_factorial(n);
1171    if exact.abs() < 1e-300 {
1172        return (approx - exact).abs();
1173    }
1174    (approx - exact).abs() / exact.abs()
1175}
1176
1177// ─── Tests ────────────────────────────────────────────────────────────────────
1178
1179#[cfg(test)]
1180mod tests {
1181    use super::*;
1182
1183    #[test]
1184    fn test_bisect_sqrt2() {
1185        let result = bisect(|x| x * x - 2.0, 1.0, 2.0, 1e-12, 100);
1186        assert!(result.converged);
1187        assert!((result.root - 2.0_f64.sqrt()).abs() < 1e-10);
1188    }
1189
1190    #[test]
1191    fn test_newton_sqrt2() {
1192        let result = newton(|x| x * x - 2.0, |x| 2.0 * x, 1.5, 1e-12, 50);
1193        assert!(result.converged);
1194        assert!((result.root - 2.0_f64.sqrt()).abs() < 1e-10);
1195    }
1196
1197    #[test]
1198    fn test_brent_cubic() {
1199        // x^3 - x - 2 has root at x≈1.5213797...
1200        let result = brent(|x| x * x * x - x - 2.0, 1.0, 2.0, 1e-12, 100);
1201        assert!(result.converged);
1202        assert!((result.root - 1.5213797).abs() < 1e-5);
1203    }
1204
1205    #[test]
1206    fn test_simpson_sin() {
1207        // ∫[0,π] sin(x) dx = 2
1208        let result = simpson(f64::sin, 0.0, PI, 100);
1209        assert!((result - 2.0).abs() < 1e-6, "Simpson: {result}");
1210    }
1211
1212    #[test]
1213    fn test_gauss_legendre5_polynomial() {
1214        // ∫[0,1] x^4 dx = 1/5
1215        let result = gauss_legendre5(|x| x * x * x * x, 0.0, 1.0);
1216        assert!((result - 0.2).abs() < 1e-12, "GL5: {result}");
1217    }
1218
1219    #[test]
1220    fn test_adaptive_integrate() {
1221        // ∫[0,1] x^2 dx = 1/3
1222        let result = adaptive_integrate(|x| x * x, 0.0, 1.0, 1e-10, 20);
1223        assert!((result - 1.0 / 3.0).abs() < 1e-8);
1224    }
1225
1226    #[test]
1227    fn test_finite_diff_central_cos() {
1228        // d/dx sin(x) at x=1.0 ≈ cos(1.0)
1229        let d = finite_diff_central(f64::sin, 1.0, 1e-6);
1230        assert!((d - 1.0_f64.cos()).abs() < 1e-8);
1231    }
1232
1233    #[test]
1234    fn test_erf_known_values() {
1235        assert!(erf(0.0).abs() < 1e-7, "erf(0)={}", erf(0.0));
1236        assert!((erf(1.0) - 0.842_700_792_9).abs() < 1e-5);
1237        assert!((erf(2.0) - 0.995_322_265_0).abs() < 1e-5);
1238    }
1239
1240    #[test]
1241    fn test_normal_cdf_symmetry() {
1242        assert!((normal_cdf(0.0) - 0.5).abs() < 1e-10);
1243        assert!((normal_cdf(-1.0) + normal_cdf(1.0) - 1.0).abs() < 1e-10);
1244    }
1245
1246    #[test]
1247    fn test_bessel_j0_zeros() {
1248        // First zero of J0 is at ≈ 2.4048
1249        let j0 = bessel_j0(2.4048);
1250        assert!(j0.abs() < 0.01, "J0 near first zero: {j0}");
1251    }
1252
1253    #[test]
1254    fn test_gamma_integers() {
1255        assert!((gamma(1.0) - 1.0).abs() < 1e-10);
1256        assert!((gamma(2.0) - 1.0).abs() < 1e-10);
1257        assert!((gamma(3.0) - 2.0).abs() < 1e-10);
1258        assert!((gamma(4.0) - 6.0).abs() < 1e-10);
1259        assert!((gamma(5.0) - 24.0).abs() < 1e-8);
1260    }
1261
1262    #[test]
1263    fn test_horner() {
1264        // 2 + 3x + 4x² at x=2 = 2 + 6 + 16 = 24
1265        assert!((horner(&[2.0, 3.0, 4.0], 2.0) - 24.0).abs() < 1e-10);
1266    }
1267
1268    #[test]
1269    fn test_legendre_orthogonality() {
1270        // P0(1) = 1, P1(1) = 1, P2(1) = 1
1271        assert!((legendre(0, 1.0) - 1.0).abs() < 1e-10);
1272        assert!((legendre(1, 1.0) - 1.0).abs() < 1e-10);
1273        assert!((legendre(2, 0.0) + 0.5).abs() < 1e-10); // P2(0) = -1/2
1274    }
1275
1276    #[test]
1277    fn test_smoothstep() {
1278        assert!(smoothstep(-1.0).abs() < 1e-10);
1279        assert!((smoothstep(1.0) - 1.0).abs() < 1e-10);
1280        assert!((smoothstep(0.5) - 0.5).abs() < 1e-10);
1281    }
1282
1283    #[test]
1284    fn test_online_stats_welford() {
1285        let mut stats = OnlineStats::new();
1286        for x in [1.0, 2.0, 3.0, 4.0, 5.0] {
1287            stats.update(x);
1288        }
1289        assert!((stats.mean() - 3.0).abs() < 1e-10);
1290        assert!((stats.variance() - 2.0).abs() < 1e-10);
1291    }
1292
1293    #[test]
1294    fn test_cfl_number() {
1295        let cfl = cfl_number(10.0, 0.01, 0.1);
1296        assert!((cfl - 1.0).abs() < 1e-10);
1297    }
1298
1299    #[test]
1300    fn test_dt_from_cfl() {
1301        let dt = dt_from_cfl(0.5, 100.0, 0.1);
1302        assert!((dt - 0.0005).abs() < 1e-10);
1303    }
1304
1305    #[test]
1306    fn test_trapezoid_tabulated() {
1307        let x = [0.0, 1.0, 2.0, 3.0];
1308        let y = [0.0, 1.0, 4.0, 9.0]; // x^2
1309        // ∫ x^2 dx ≈ trapezoid: 0.5+2.5+6.5 = 9.5 (not exact but check formula)
1310        let result = trapezoid_tabulated(&x, &y);
1311        assert!(result > 0.0, "trapezoidal result should be positive");
1312    }
1313
1314    #[test]
1315    fn test_bilinear() {
1316        // f(0,0)=0, f(1,0)=1, f(0,1)=0, f(1,1)=1 → at (0.5, 0.5) = 0.5
1317        let result = bilinear_arr([0.0, 1.0, 0.0, 1.0], 0.5, 0.5);
1318        assert!((result - 0.5).abs() < 1e-10);
1319    }
1320
1321    // ── New function tests ────────────────────────────────────────────────────
1322
1323    #[test]
1324    fn test_romberg_sin() {
1325        // ∫[0,π] sin(x) dx = 2
1326        let result = romberg(f64::sin, 0.0, PI, 10);
1327        assert!((result - 2.0).abs() < 1e-9, "Romberg: {result}");
1328    }
1329
1330    #[test]
1331    fn test_romberg_polynomial() {
1332        // ∫[0,1] x^3 dx = 0.25
1333        let result = romberg(|x| x * x * x, 0.0, 1.0, 8);
1334        assert!((result - 0.25).abs() < 1e-10, "Romberg x^3: {result}");
1335    }
1336
1337    #[test]
1338    fn test_gauss_legendre_n_sin() {
1339        // ∫[0,π] sin(x) dx = 2 (GL6 is approximate for non-polynomial sin)
1340        let result = gauss_legendre_n(f64::sin, 0.0, PI, 6);
1341        assert!((result - 2.0).abs() < 1e-6, "GL6 sin: {result}");
1342    }
1343
1344    #[test]
1345    fn test_gauss_legendre_n_degree9() {
1346        // ∫[-1,1] x^8 dx = 2/9; GL6 is exact for degree ≤ 11
1347        let result = gauss_legendre_n(|x| x.powi(8), -1.0, 1.0, 6);
1348        assert!((result - 2.0 / 9.0).abs() < 1e-10, "GL6 x^8: {result}");
1349    }
1350
1351    #[test]
1352    fn test_gauss_legendre_nw_orders() {
1353        // n=2,3,4 should all integrate x^2 on [-1,1] = 2/3 exactly
1354        for n in [2, 3, 4, 6] {
1355            let result = gauss_legendre_n(|x| x * x, -1.0, 1.0, n);
1356            assert!((result - 2.0 / 3.0).abs() < 1e-8, "GL{n} x^2: {result}");
1357        }
1358    }
1359
1360    #[test]
1361    fn test_richardson_derivative_cos() {
1362        // d/dx sin(x) at x=1.0 = cos(1.0)
1363        let d = richardson_derivative(f64::sin, 1.0, 1e-4);
1364        assert!(
1365            (d - 1.0_f64.cos()).abs() < 1e-10,
1366            "Richardson d/dx sin: {d}"
1367        );
1368    }
1369
1370    #[test]
1371    fn test_richardson_derivative_polynomial() {
1372        // d/dx x^3 at x=2.0 = 12.0
1373        let d = richardson_derivative(|x: f64| x.powi(3), 2.0, 1e-4);
1374        assert!((d - 12.0).abs() < 1e-8, "Richardson d/dx x^3: {d}");
1375    }
1376
1377    #[test]
1378    fn test_richardson_second_derivative() {
1379        // d²/dx² sin(x) at x=1.0 = -sin(1.0)
1380        let d2 = richardson_second_derivative(f64::sin, 1.0, 1e-4);
1381        assert!(
1382            (d2 + 1.0_f64.sin()).abs() < 1e-5,
1383            "Richardson d2/dx2 sin: {d2}"
1384        );
1385    }
1386
1387    #[test]
1388    fn test_richardson_second_derivative_polynomial() {
1389        // d²/dx² x^4 at x=2.0 = 12 * 4 = 48
1390        let d2 = richardson_second_derivative(|x: f64| x.powi(4), 2.0, 1e-3);
1391        assert!((d2 - 48.0).abs() < 1e-5, "Richardson d2/dx2 x^4: {d2}");
1392    }
1393
1394    #[test]
1395    fn test_find_x_for_value() {
1396        // find x such that x^2 = 9 in [0, 5] → x = 3
1397        let result = find_x_for_value(|x| x * x, 9.0, 0.0, 5.0, 1e-12, 100);
1398        assert!(result.converged);
1399        assert!((result.root - 3.0).abs() < 1e-9, "find_x: {}", result.root);
1400    }
1401
1402    #[test]
1403    fn test_beta_symmetry() {
1404        // B(a, b) = B(b, a)
1405        let b1 = beta(2.0, 3.0);
1406        let b2 = beta(3.0, 2.0);
1407        assert!((b1 - b2).abs() < 1e-12, "Beta symmetry: {b1} vs {b2}");
1408    }
1409
1410    #[test]
1411    fn test_beta_known_value() {
1412        // B(1, 1) = 1; B(2, 2) = 1/6
1413        assert!((beta(1.0, 1.0) - 1.0).abs() < 1e-10);
1414        assert!((beta(2.0, 2.0) - 1.0 / 6.0).abs() < 1e-10);
1415    }
1416
1417    #[test]
1418    fn test_beta_half_integer() {
1419        // B(0.5, 0.5) = π
1420        assert!(
1421            (beta(0.5, 0.5) - PI).abs() < 1e-8,
1422            "B(1/2,1/2)={}",
1423            beta(0.5, 0.5)
1424        );
1425    }
1426
1427    #[test]
1428    fn test_digamma_integer() {
1429        // ψ(1) = -γ ≈ -0.5772156649
1430        let psi1 = digamma(1.0);
1431        assert!((psi1 + 0.5772156649).abs() < 1e-5, "ψ(1)={psi1}");
1432        // ψ(2) = 1 - γ ≈ 0.4227843351
1433        let psi2 = digamma(2.0);
1434        assert!((psi2 - 0.4227843351).abs() < 1e-5, "ψ(2)={psi2}");
1435    }
1436
1437    #[test]
1438    fn test_digamma_recurrence() {
1439        // ψ(x+1) = ψ(x) + 1/x
1440        let x = 3.7;
1441        assert!((digamma(x + 1.0) - digamma(x) - 1.0 / x).abs() < 1e-8);
1442    }
1443
1444    #[test]
1445    fn test_erfinv_roundtrip() {
1446        // erfinv(erf(x)) ≈ x for |x| ≤ 0.5 (Winitzki approx, max error ~0.00035)
1447        for &x in &[0.0_f64, 0.3, -0.3, 0.5] {
1448            let y = erf(x);
1449            let x_back = erfinv(y);
1450            assert!((x_back - x).abs() < 5e-3, "erfinv(erf({x}))={x_back}");
1451        }
1452    }
1453
1454    #[test]
1455    fn test_erfinv_known() {
1456        // erfinv(0) = 0
1457        assert!(erfinv(0.0).abs() < 1e-6, "erfinv(0)={}", erfinv(0.0));
1458    }
1459
1460    #[test]
1461    fn test_ei_small() {
1462        // Ei(1) ≈ 1.8951178163559366
1463        let val = ei(1.0);
1464        assert!((val - 1.8951178163559366).abs() < 1e-6, "Ei(1)={val}");
1465    }
1466
1467    #[test]
1468    fn test_ei_large() {
1469        // Ei(10) ≈ 2492.2289 (known value)
1470        let val = ei(10.0);
1471        assert!((val - 2492.2289).abs() < 0.5, "Ei(10)={val}");
1472    }
1473
1474    #[test]
1475    fn test_ei_negative_returns_neg_inf() {
1476        assert_eq!(ei(-1.0), f64::NEG_INFINITY);
1477        assert_eq!(ei(0.0), f64::NEG_INFINITY);
1478    }
1479
1480    // ── Secant method ─────────────────────────────────────────────────────────
1481
1482    #[test]
1483    fn test_secant_sqrt2() {
1484        let result = secant(|x| x * x - 2.0, 1.0, 2.0, 1e-12, 50);
1485        assert!(result.converged, "secant did not converge");
1486        assert!(
1487            (result.root - 2.0_f64.sqrt()).abs() < 1e-10,
1488            "root={}",
1489            result.root
1490        );
1491    }
1492
1493    #[test]
1494    fn test_secant_cubic() {
1495        // x^3 - x - 2 root ≈ 1.5213797
1496        let result = secant(|x| x * x * x - x - 2.0, 1.0, 2.0, 1e-10, 50);
1497        assert!(result.converged);
1498        assert!(
1499            (result.root - 1.5213797).abs() < 1e-6,
1500            "root={}",
1501            result.root
1502        );
1503    }
1504
1505    #[test]
1506    fn test_secant_sin() {
1507        // sin(x) = 0 near x = π
1508        let result = secant(f64::sin, 3.0, 4.0, 1e-12, 50);
1509        assert!(result.converged);
1510        assert!((result.root - PI).abs() < 1e-10, "root={}", result.root);
1511    }
1512
1513    #[test]
1514    fn test_secant_exact_root() {
1515        // x - 5 = 0 → root = 5
1516        let result = secant(|x| x - 5.0, 4.0, 6.0, 1e-12, 50);
1517        assert!(result.converged);
1518        assert!((result.root - 5.0).abs() < 1e-10);
1519    }
1520
1521    // ── Continued fractions ───────────────────────────────────────────────────
1522
1523    #[test]
1524    fn test_continued_fraction_constant() {
1525        // b0 + a1/(b1) with a=[1], b=[1] → 1 + 1/1 = 2
1526        let val = continued_fraction(1.0, &[1.0], &[1.0]);
1527        assert!((val - 2.0).abs() < 1e-12, "cf={val}");
1528    }
1529
1530    #[test]
1531    fn test_continued_fraction_golden_ratio() {
1532        // Golden ratio φ = 1 + 1/(1 + 1/(1 + ...)) ≈ 1.618...
1533        // Finite CF: b0=1, a=[1,1,1,...] b=[1,1,1,...]
1534        let n = 30;
1535        let a = vec![1.0_f64; n];
1536        let b = vec![1.0_f64; n];
1537        let phi = continued_fraction(1.0, &a, &b);
1538        let golden = (1.0 + 5.0_f64.sqrt()) / 2.0;
1539        assert!((phi - golden).abs() < 1e-6, "phi={phi}, golden={golden}");
1540    }
1541
1542    #[test]
1543    fn test_continued_fraction_empty() {
1544        // No terms → just b0
1545        let val = continued_fraction(3.125, &[], &[]);
1546        assert!((val - 3.125).abs() < 1e-12);
1547    }
1548
1549    #[test]
1550    fn test_sqrt2_cf_converges() {
1551        // sqrt(2) ≈ 1.41421356...
1552        let approx = sqrt2_cf(40);
1553        assert!((approx - 2.0_f64.sqrt()).abs() < 1e-12, "sqrt2_cf={approx}");
1554    }
1555
1556    #[test]
1557    fn test_sqrt2_cf_improves_with_depth() {
1558        let d5 = sqrt2_cf(5);
1559        let d20 = sqrt2_cf(20);
1560        let exact = 2.0_f64.sqrt();
1561        assert!(
1562            (d20 - exact).abs() < (d5 - exact).abs(),
1563            "deeper CF should be more accurate"
1564        );
1565    }
1566
1567    // ── Bernoulli numbers ─────────────────────────────────────────────────────
1568
1569    #[test]
1570    fn test_bernoulli_b0_is_one() {
1571        let b = bernoulli_numbers(1);
1572        assert!((b[0] - 1.0).abs() < 1e-12, "B0={}", b[0]);
1573    }
1574
1575    #[test]
1576    fn test_bernoulli_b1_is_minus_half() {
1577        let b = bernoulli_numbers(2);
1578        assert!((b[1] + 0.5).abs() < 1e-12, "B1={}", b[1]);
1579    }
1580
1581    #[test]
1582    fn test_bernoulli_odd_are_zero() {
1583        let b = bernoulli_numbers(10);
1584        // B3, B5, B7, B9 should be 0 (odd index > 1)
1585        for k in [3, 5, 7, 9] {
1586            assert!(b[k].abs() < 1e-12, "B{k}={}", b[k]);
1587        }
1588    }
1589
1590    #[test]
1591    fn test_bernoulli_b2_is_one_sixth() {
1592        let b = bernoulli_numbers(3);
1593        assert!((b[2] - 1.0 / 6.0).abs() < 1e-12, "B2={}", b[2]);
1594    }
1595
1596    #[test]
1597    fn test_bernoulli_b4_is_minus_one_thirtieth() {
1598        let b = bernoulli_numbers(5);
1599        assert!((b[4] + 1.0 / 30.0).abs() < 1e-12, "B4={}", b[4]);
1600    }
1601
1602    #[test]
1603    fn test_bernoulli_b6_is_one_fortysecond() {
1604        let b = bernoulli_numbers(7);
1605        assert!((b[6] - 1.0 / 42.0).abs() < 1e-12, "B6={}", b[6]);
1606    }
1607
1608    #[test]
1609    fn test_bernoulli_count() {
1610        let b = bernoulli_numbers(8);
1611        assert_eq!(b.len(), 8);
1612    }
1613
1614    #[test]
1615    fn test_bernoulli_empty() {
1616        let b = bernoulli_numbers(0);
1617        assert!(b.is_empty());
1618    }
1619
1620    // ── Stirling approximation ────────────────────────────────────────────────
1621
1622    #[test]
1623    fn test_stirling_large_n_accuracy() {
1624        // For n=100, Stirling should be very accurate
1625        let err = stirling_relative_error(100.0);
1626        assert!(err < 1e-10, "Stirling relative error at n=100: {err}");
1627    }
1628
1629    #[test]
1630    fn test_stirling_n10() {
1631        // ln(10!) = ln(3628800) ≈ 15.1044...
1632        let exact_ln_10_fact = 15.104_412_573_075_518;
1633        let approx = stirling_ln_factorial(10.0);
1634        assert!(
1635            (approx - exact_ln_10_fact).abs() < 0.01,
1636            "Stirling ln(10!)={approx}"
1637        );
1638    }
1639
1640    #[test]
1641    fn test_stirling_n1000_very_accurate() {
1642        // Stirling's approximation is very accurate for large n; relative error
1643        // (against lgamma(n+1)) should be well below 1e-8 at n=1000.
1644        let err = stirling_relative_error(1000.0);
1645        assert!(err < 1e-8, "Stirling relative error at n=1000: {err}");
1646    }
1647
1648    #[test]
1649    fn test_stirling_factorial_50() {
1650        // Compare Stirling(50) vs lgamma(51)
1651        let exact_ln = lgamma(51.0);
1652        let approx_ln = stirling_ln_factorial(50.0);
1653        let rel_err = (approx_ln - exact_ln).abs() / exact_ln.abs();
1654        assert!(
1655            rel_err < 1e-10,
1656            "Stirling ln(50!)={approx_ln}, exact={exact_ln}"
1657        );
1658    }
1659
1660    #[test]
1661    fn test_stirling_zero() {
1662        // 0! = 1, ln(1) = 0; function returns 0 for n≤0
1663        assert!(stirling_ln_factorial(0.0).abs() < 1e-12);
1664    }
1665
1666    // ── Additional special function tests ─────────────────────────────────────
1667
1668    #[test]
1669    fn test_bessel_j1_at_zero() {
1670        // J1(0) = 0
1671        assert!(bessel_j1(0.0).abs() < 1e-10, "J1(0)={}", bessel_j1(0.0));
1672    }
1673
1674    #[test]
1675    fn test_bessel_j1_negative_antisymmetry() {
1676        // J1(-x) = -J1(x)
1677        let x = 2.5;
1678        assert!((bessel_j1(-x) + bessel_j1(x)).abs() < 1e-12);
1679    }
1680
1681    #[test]
1682    fn test_bessel_j0_at_large_x() {
1683        // J0 decays for large x; |J0(100)| < 1
1684        assert!(bessel_j0(100.0).abs() < 1.0);
1685    }
1686
1687    #[test]
1688    fn test_erfc_complement() {
1689        // erfc(x) + erf(x) = 1
1690        for &x in &[0.0_f64, 0.5, 1.0, 2.0] {
1691            let sum = erf(x) + erfc(x);
1692            assert!((sum - 1.0).abs() < 1e-10, "erf+erfc≠1 at x={x}: {sum}");
1693        }
1694    }
1695
1696    #[test]
1697    fn test_gamma_half() {
1698        // Γ(1/2) = √π
1699        let g = gamma(0.5);
1700        assert!((g - PI.sqrt()).abs() < 1e-8, "Γ(0.5)={g}");
1701    }
1702
1703    #[test]
1704    fn test_brent_sin_root() {
1705        // sin(x) = 0 in [3, 4]: root at π
1706        let result = brent(f64::sin, 3.0, 4.0, 1e-12, 100);
1707        assert!(result.converged);
1708        assert!((result.root - PI).abs() < 1e-10);
1709    }
1710
1711    #[test]
1712    fn test_chebyshev_t_known() {
1713        // T0(x) = 1, T1(x) = x, T2(x) = 2x²-1
1714        assert!((chebyshev_t(0, 0.7) - 1.0).abs() < 1e-12);
1715        assert!((chebyshev_t(1, 0.7) - 0.7).abs() < 1e-12);
1716        assert!((chebyshev_t(2, 0.7) - (2.0 * 0.7 * 0.7 - 1.0)).abs() < 1e-12);
1717    }
1718
1719    #[test]
1720    fn test_finite_diff_second_sin() {
1721        // d²/dx² sin(x) = -sin(x)
1722        let d2 = finite_diff_second(f64::sin, 1.0, 1e-4);
1723        assert!((d2 + 1.0_f64.sin()).abs() < 1e-5, "d2={d2}");
1724    }
1725
1726    #[test]
1727    fn test_hessian_quadratic() {
1728        // f(x,y) = x² + 2y²; Hessian = [[2, 0], [0, 4]]
1729        let f = |v: &[f64]| v[0] * v[0] + 2.0 * v[1] * v[1];
1730        let h = hessian(&f, &[1.0, 1.0], 1e-4);
1731        assert!((h[0][0] - 2.0).abs() < 1e-6, "H[0][0]={}", h[0][0]);
1732        assert!((h[1][1] - 4.0).abs() < 1e-6, "H[1][1]={}", h[1][1]);
1733        assert!(h[0][1].abs() < 1e-6, "H[0][1]={}", h[0][1]);
1734    }
1735
1736    #[test]
1737    fn test_regula_falsi_sqrt3() {
1738        let result = regula_falsi(|x| x * x - 3.0, 1.0, 2.0, 1e-12, 100);
1739        assert!(result.converged);
1740        assert!((result.root - 3.0_f64.sqrt()).abs() < 1e-10);
1741    }
1742
1743    #[test]
1744    fn test_online_stats_merge() {
1745        let mut s1 = OnlineStats::new();
1746        let mut s2 = OnlineStats::new();
1747        for &x in &[1.0_f64, 2.0, 3.0] {
1748            s1.update(x);
1749        }
1750        for &x in &[4.0_f64, 5.0] {
1751            s2.update(x);
1752        }
1753        s1.merge(&s2);
1754        assert_eq!(s1.count(), 5);
1755        assert!((s1.mean() - 3.0).abs() < 1e-10);
1756    }
1757
1758    #[test]
1759    fn test_gradient_3d_sphere() {
1760        // f(x,y,z) = x²+y²+z², ∇f = [2x, 2y, 2z]
1761        let f = |v: [f64; 3]| v[0] * v[0] + v[1] * v[1] + v[2] * v[2];
1762        let g = gradient_3d(f, [1.0, 2.0, 3.0], 1e-5);
1763        assert!((g[0] - 2.0).abs() < 1e-8);
1764        assert!((g[1] - 4.0).abs() < 1e-8);
1765        assert!((g[2] - 6.0).abs() < 1e-8);
1766    }
1767
1768    #[test]
1769    fn test_inc_beta_symmetry() {
1770        // I(x; a, b) + I(1-x; b, a) = 1
1771        let x = 0.3;
1772        let a = 2.0;
1773        let b = 3.0;
1774        assert!((inc_beta(x, a, b) + inc_beta(1.0 - x, b, a) - 1.0).abs() < 1e-8);
1775    }
1776
1777    #[test]
1778    fn test_trilinear_arr_corners() {
1779        let vals = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1780        assert!((trilinear_arr(vals, 0.0, 0.0, 0.0) - 1.0).abs() < 1e-12);
1781        assert!((trilinear_arr(vals, 1.0, 1.0, 1.0) - 8.0).abs() < 1e-12);
1782    }
1783
1784    #[test]
1785    fn test_smootherstep_boundary() {
1786        assert!((smootherstep(0.0)).abs() < 1e-12);
1787        assert!((smootherstep(1.0) - 1.0).abs() < 1e-12);
1788        // derivative at endpoints should be 0 (smootherstep has zero first and second derivative)
1789        let d = (smootherstep(0.001) - smootherstep(0.0)) / 0.001;
1790        assert!(d < 0.01, "derivative at 0 should be near 0: {d}");
1791    }
1792}