Skip to main content

oxilean_std/numerical_analysis/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::functions::*;
6/// Tikhonov-regularized least-squares solver: minimizes ||Ax - b||² + λ||x||².
7///
8/// Solves the normal equations (AᵀA + λI)x = Aᵀb using Gaussian elimination.
9#[allow(dead_code)]
10pub struct TikhonovSolver {
11    pub lambda: f64,
12}
13#[allow(dead_code)]
14impl TikhonovSolver {
15    /// Create a new `TikhonovSolver` with regularization parameter `lambda`.
16    pub fn new(lambda: f64) -> Self {
17        Self { lambda }
18    }
19    /// Solve the regularized least-squares problem.
20    ///
21    /// `a` is the m×n coefficient matrix (row-major), `b` is the rhs of length m.
22    /// Returns the solution vector of length n, or `None` if the system is singular.
23    pub fn solve(&self, a: &[Vec<f64>], b: &[f64]) -> Option<Vec<f64>> {
24        let m = a.len();
25        let n = if m == 0 { return None } else { a[0].len() };
26        let mut ata = vec![vec![0.0f64; n]; n];
27        for i in 0..n {
28            for j in 0..n {
29                let mut s = 0.0;
30                for k in 0..m {
31                    s += a[k][i] * a[k][j];
32                }
33                ata[i][j] = s;
34            }
35            ata[i][i] += self.lambda;
36        }
37        let mut atb = vec![0.0f64; n];
38        for i in 0..n {
39            let mut s = 0.0;
40            for k in 0..m {
41                s += a[k][i] * b[k];
42            }
43            atb[i] = s;
44        }
45        gaussian_elimination(ata, atb)
46    }
47}
48/// Bisection root-finder with configurable tolerance and iteration limit.
49pub struct BisectionSolver {
50    pub tol: f64,
51    pub max_iter: u32,
52}
53impl BisectionSolver {
54    /// Create a new `BisectionSolver`.
55    pub fn new(tol: f64, max_iter: u32) -> Self {
56        Self { tol, max_iter }
57    }
58    /// Find a root of `f` in `[a, b]`.  Returns `None` if the sign condition fails or
59    /// the solver does not converge.
60    pub fn solve(&self, f: &dyn Fn(f64) -> f64, mut a: f64, mut b: f64) -> Option<f64> {
61        if f(a) * f(b) > 0.0 {
62            return None;
63        }
64        for _ in 0..self.max_iter {
65            let mid = (a + b) / 2.0;
66            let fm = f(mid);
67            if fm.abs() < self.tol || (b - a) / 2.0 < self.tol {
68                return Some(mid);
69            }
70            if f(a) * fm < 0.0 {
71                b = mid;
72            } else {
73                a = mid;
74            }
75        }
76        Some((a + b) / 2.0)
77    }
78}
79/// Newton–Raphson solver that also reports whether convergence was achieved.
80pub struct NewtonRaphsonSolver {
81    pub tol: f64,
82    pub max_iter: u32,
83}
84impl NewtonRaphsonSolver {
85    /// Create a new `NewtonRaphsonSolver`.
86    pub fn new(tol: f64, max_iter: u32) -> Self {
87        Self { tol, max_iter }
88    }
89    /// Attempt to find a root of `f` starting from `x0`.
90    /// Returns `(root, converged)`.
91    pub fn solve(
92        &self,
93        f: &dyn Fn(f64) -> f64,
94        df: &dyn Fn(f64) -> f64,
95        mut x: f64,
96    ) -> (f64, bool) {
97        for _ in 0..self.max_iter {
98            let fx = f(x);
99            if fx.abs() < self.tol {
100                return (x, true);
101            }
102            let dfx = df(x);
103            if dfx.abs() < 1e-15 {
104                return (x, false);
105            }
106            x -= fx / dfx;
107        }
108        (x, f(x).abs() < self.tol)
109    }
110}
111/// Sparse matrix in Compressed Sparse Row (CSR) format.
112///
113/// `row_ptr[i]..row_ptr[i+1]` gives the range of column indices/values for row `i`.
114pub struct SparseMatrix {
115    /// Number of rows.
116    pub nrows: usize,
117    /// Number of columns.
118    pub ncols: usize,
119    /// Row pointers (length nrows + 1).
120    pub row_ptr: Vec<usize>,
121    /// Column indices of non-zero entries.
122    pub col_idx: Vec<usize>,
123    /// Non-zero values.
124    pub values: Vec<f64>,
125}
126impl SparseMatrix {
127    /// Build a `SparseMatrix` from a list of `(row, col, value)` triplets.
128    pub fn from_triplets(nrows: usize, ncols: usize, triplets: &[(usize, usize, f64)]) -> Self {
129        let mut counts = vec![0usize; nrows];
130        for &(r, _, _) in triplets {
131            counts[r] += 1;
132        }
133        let mut row_ptr = vec![0usize; nrows + 1];
134        for i in 0..nrows {
135            row_ptr[i + 1] = row_ptr[i] + counts[i];
136        }
137        let nnz = triplets.len();
138        let mut col_idx = vec![0usize; nnz];
139        let mut values = vec![0.0f64; nnz];
140        let mut pos = row_ptr.clone();
141        for &(r, c, v) in triplets {
142            let p = pos[r];
143            col_idx[p] = c;
144            values[p] = v;
145            pos[r] += 1;
146        }
147        Self {
148            nrows,
149            ncols,
150            row_ptr,
151            col_idx,
152            values,
153        }
154    }
155    /// Sparse matrix-vector product: y = A * x.
156    pub fn matvec(&self, x: &[f64]) -> Vec<f64> {
157        assert_eq!(x.len(), self.ncols, "x length must equal ncols");
158        let mut y = vec![0.0f64; self.nrows];
159        for i in 0..self.nrows {
160            let start = self.row_ptr[i];
161            let end = self.row_ptr[i + 1];
162            for k in start..end {
163                y[i] += self.values[k] * x[self.col_idx[k]];
164            }
165        }
166        y
167    }
168    /// Return the number of non-zero entries.
169    pub fn nnz(&self) -> usize {
170        self.values.len()
171    }
172}
173/// A closed real interval [lo, hi] used for validated/verified computation.
174#[allow(dead_code)]
175#[derive(Clone, Copy, Debug, PartialEq)]
176pub struct Interval {
177    pub lo: f64,
178    pub hi: f64,
179}
180#[allow(dead_code)]
181impl Interval {
182    /// Construct the interval [lo, hi].  Panics if lo > hi.
183    pub fn new(lo: f64, hi: f64) -> Self {
184        assert!(lo <= hi, "Interval::new: lo ({lo}) must be <= hi ({hi})");
185        Self { lo, hi }
186    }
187    /// Construct a point interval [x, x].
188    pub fn point(x: f64) -> Self {
189        Self { lo: x, hi: x }
190    }
191    /// Interval addition: [a,b] + [c,d] = [a+c, b+d].
192    pub fn add(self, other: Self) -> Self {
193        Self {
194            lo: self.lo + other.lo,
195            hi: self.hi + other.hi,
196        }
197    }
198    /// Interval subtraction: [a,b] - [c,d] = [a-d, b-c].
199    pub fn sub(self, other: Self) -> Self {
200        Self {
201            lo: self.lo - other.hi,
202            hi: self.hi - other.lo,
203        }
204    }
205    /// Interval multiplication (all four products, take min/max).
206    pub fn mul(self, other: Self) -> Self {
207        let p = [
208            self.lo * other.lo,
209            self.lo * other.hi,
210            self.hi * other.lo,
211            self.hi * other.hi,
212        ];
213        let lo = p.iter().cloned().fold(f64::INFINITY, f64::min);
214        let hi = p.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
215        Self { lo, hi }
216    }
217    /// Interval width hi - lo.
218    pub fn width(self) -> f64 {
219        self.hi - self.lo
220    }
221    /// Midpoint of the interval.
222    pub fn mid(self) -> f64 {
223        (self.lo + self.hi) / 2.0
224    }
225    /// Check whether a real value is contained in [lo, hi].
226    pub fn contains(self, x: f64) -> bool {
227        self.lo <= x && x <= self.hi
228    }
229    /// Interval enclosure of `sqrt` (valid for non-negative intervals).
230    pub fn sqrt(self) -> Self {
231        assert!(
232            self.lo >= 0.0,
233            "Interval::sqrt requires non-negative interval"
234        );
235        Self {
236            lo: self.lo.sqrt(),
237            hi: self.hi.sqrt(),
238        }
239    }
240}
241/// Power iteration to find the dominant eigenvalue of a dense matrix.
242pub struct PowerIterationSolver {
243    pub tol: f64,
244    pub max_iter: u32,
245}
246impl PowerIterationSolver {
247    /// Create a new `PowerIterationSolver`.
248    pub fn new(tol: f64, max_iter: u32) -> Self {
249        Self { tol, max_iter }
250    }
251    /// Find the dominant eigenvalue and eigenvector of `a` (row-major, n×n).
252    ///
253    /// Returns `(eigenvalue, eigenvector)` or `None` if not converged.
254    pub fn solve(&self, a: &[Vec<f64>]) -> Option<(f64, Vec<f64>)> {
255        let n = a.len();
256        if n == 0 {
257            return None;
258        }
259        let mut v: Vec<f64> = vec![1.0; n];
260        let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
261        v.iter_mut().for_each(|x| *x /= norm);
262        let mut eigenvalue = 0.0;
263        for _ in 0..self.max_iter {
264            let w: Vec<f64> = (0..n)
265                .map(|i| a[i].iter().zip(v.iter()).map(|(aij, vj)| aij * vj).sum())
266                .collect();
267            let new_eig: f64 = w.iter().zip(v.iter()).map(|(wi, vi)| wi * vi).sum();
268            let w_norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
269            if w_norm < 1e-15 {
270                return None;
271            }
272            let new_v: Vec<f64> = w.iter().map(|x| x / w_norm).collect();
273            if (new_eig - eigenvalue).abs() < self.tol {
274                return Some((new_eig, new_v));
275            }
276            eigenvalue = new_eig;
277            v = new_v;
278        }
279        None
280    }
281}
282/// Classic 4th-order Runge-Kutta ODE stepper.
283pub struct RungeKutta4Solver {
284    pub h: f64,
285}
286impl RungeKutta4Solver {
287    /// Create a new RK4 solver with step size `h`.
288    pub fn new(h: f64) -> Self {
289        Self { h }
290    }
291    /// Advance the solution by one step.
292    pub fn step(&self, f: &dyn Fn(f64, f64) -> f64, t: f64, y: f64) -> f64 {
293        let h = self.h;
294        let k1 = f(t, y);
295        let k2 = f(t + h / 2.0, y + h / 2.0 * k1);
296        let k3 = f(t + h / 2.0, y + h / 2.0 * k2);
297        let k4 = f(t + h, y + h * k3);
298        y + h / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
299    }
300    /// Integrate from `t0` to `t_end`, returning `(t, y)` pairs.
301    pub fn integrate(
302        &self,
303        f: &dyn Fn(f64, f64) -> f64,
304        t0: f64,
305        y0: f64,
306        t_end: f64,
307    ) -> Vec<(f64, f64)> {
308        let steps = ((t_end - t0) / self.h).ceil() as u64;
309        let mut result = Vec::with_capacity(steps as usize + 1);
310        let mut t = t0;
311        let mut y = y0;
312        result.push((t, y));
313        for _ in 0..steps {
314            let t_next = (t + self.h).min(t_end);
315            let h_actual = t_next - t;
316            let k1 = f(t, y);
317            let k2 = f(t + h_actual / 2.0, y + h_actual / 2.0 * k1);
318            let k3 = f(t + h_actual / 2.0, y + h_actual / 2.0 * k2);
319            let k4 = f(t + h_actual, y + h_actual * k3);
320            y += h_actual / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4);
321            t = t_next;
322            result.push((t, y));
323            if (t - t_end).abs() < 1e-14 {
324                break;
325            }
326        }
327        result
328    }
329}
330/// Gradient-descent minimizer for a smooth function f : Rⁿ → R.
331///
332/// Uses fixed step size (learning rate) with optional Armijo line-search.
333#[allow(dead_code)]
334pub struct GradientDescentOptimizer {
335    pub learning_rate: f64,
336    pub tol: f64,
337    pub max_iter: u32,
338}
339#[allow(dead_code)]
340impl GradientDescentOptimizer {
341    /// Create a new optimizer.
342    pub fn new(learning_rate: f64, tol: f64, max_iter: u32) -> Self {
343        Self {
344            learning_rate,
345            tol,
346            max_iter,
347        }
348    }
349    /// Minimize `f` starting from `x0` using `grad` to supply ∇f.
350    ///
351    /// Returns `(minimizer, num_iters, converged)`.
352    pub fn minimize(&self, grad: &dyn Fn(&[f64]) -> Vec<f64>, x0: &[f64]) -> (Vec<f64>, u32, bool) {
353        let n = x0.len();
354        let mut x = x0.to_vec();
355        for iter in 0..self.max_iter {
356            let g = grad(&x);
357            let g_norm: f64 = g.iter().map(|v| v * v).sum::<f64>().sqrt();
358            if g_norm < self.tol {
359                return (x, iter, true);
360            }
361            for i in 0..n {
362                x[i] -= self.learning_rate * g[i];
363            }
364        }
365        let g = grad(&x);
366        let g_norm: f64 = g.iter().map(|v| v * v).sum::<f64>().sqrt();
367        (x, self.max_iter, g_norm < self.tol)
368    }
369}
370/// Crank-Nicolson finite-difference solver for the 1-D heat equation u_t = κ u_xx
371/// on the spatial domain [0, L] with Dirichlet boundary conditions u(0,t) = u(L,t) = 0.
372///
373/// The scheme is unconditionally stable (A-stable).
374#[allow(dead_code)]
375pub struct CrankNicolsonSolver {
376    /// Diffusivity coefficient κ.
377    pub kappa: f64,
378    /// Spatial domain length.
379    pub domain_length: f64,
380    /// Number of internal spatial nodes.
381    pub nx: usize,
382    /// Time step.
383    pub dt: f64,
384}
385#[allow(dead_code)]
386impl CrankNicolsonSolver {
387    /// Create a new `CrankNicolsonSolver`.
388    pub fn new(kappa: f64, domain_length: f64, nx: usize, dt: f64) -> Self {
389        Self {
390            kappa,
391            domain_length,
392            nx,
393            dt,
394        }
395    }
396    /// Advance the solution `u` (length `nx`, interior nodes) by one time step.
397    ///
398    /// Returns the updated interior values or `None` if the tridiagonal solve fails.
399    pub fn step(&self, u: &[f64]) -> Option<Vec<f64>> {
400        let n = self.nx;
401        assert_eq!(u.len(), n, "u must have length nx");
402        let dx = self.domain_length / (n + 1) as f64;
403        let r = self.kappa * self.dt / (dx * dx);
404        let alpha = -r / 2.0;
405        let beta = 1.0 + r;
406        let mut rhs = vec![0.0f64; n];
407        for i in 0..n {
408            let ul = if i > 0 { u[i - 1] } else { 0.0 };
409            let uc = u[i];
410            let ur = if i < n - 1 { u[i + 1] } else { 0.0 };
411            rhs[i] = (r / 2.0) * ul + (1.0 - r) * uc + (r / 2.0) * ur;
412        }
413        let mut c_prime = vec![0.0f64; n];
414        let mut d_prime = vec![0.0f64; n];
415        c_prime[0] = alpha / beta;
416        d_prime[0] = rhs[0] / beta;
417        for i in 1..n {
418            let denom = beta - alpha * c_prime[i - 1];
419            if denom.abs() < 1e-15 {
420                return None;
421            }
422            c_prime[i] = alpha / denom;
423            d_prime[i] = (rhs[i] - alpha * d_prime[i - 1]) / denom;
424        }
425        let mut u_new = vec![0.0f64; n];
426        u_new[n - 1] = d_prime[n - 1];
427        for i in (0..n - 1).rev() {
428            u_new[i] = d_prime[i] - c_prime[i] * u_new[i + 1];
429        }
430        Some(u_new)
431    }
432    /// Integrate from `t=0` to `t=t_end`, returning the solution at each stored step.
433    pub fn integrate(&self, u0: &[f64], t_end: f64) -> Vec<Vec<f64>> {
434        let steps = (t_end / self.dt).ceil() as u64;
435        let mut u = u0.to_vec();
436        let mut history = Vec::with_capacity(steps as usize + 1);
437        history.push(u.clone());
438        for _ in 0..steps {
439            if let Some(u_new) = self.step(&u) {
440                u = u_new;
441            } else {
442                break;
443            }
444            history.push(u.clone());
445        }
446        history
447    }
448}
449/// Monte Carlo integrator over [a, b] using a control variate to reduce variance.
450///
451/// The control variate `cv` must have a known exact integral `cv_integral` over [a,b].
452/// Uses a simple pseudo-random sequence via a linear congruential generator (no external deps).
453#[allow(dead_code)]
454pub struct MonteCarloIntegrator {
455    pub n_samples: u64,
456    pub seed: u64,
457}
458#[allow(dead_code)]
459impl MonteCarloIntegrator {
460    /// Create a new `MonteCarloIntegrator`.
461    pub fn new(n_samples: u64, seed: u64) -> Self {
462        Self { n_samples, seed }
463    }
464    /// Generate pseudo-random floats in [0,1) using a LCG.
465    fn lcg_samples(&self, n: usize) -> Vec<f64> {
466        let mut state = self.seed.wrapping_add(1);
467        let mut out = Vec::with_capacity(n);
468        let a: u64 = 1664525;
469        let c: u64 = 1013904223;
470        let m: u64 = 1 << 32;
471        for _ in 0..n {
472            state = (a.wrapping_mul(state).wrapping_add(c)) % m;
473            out.push(state as f64 / m as f64);
474        }
475        out
476    }
477    /// Estimate ∫_a^b f(x) dx using crude Monte Carlo.
478    pub fn integrate(&self, f: &dyn Fn(f64) -> f64, a: f64, b: f64) -> f64 {
479        let samples = self.lcg_samples(self.n_samples as usize);
480        let sum: f64 = samples.iter().map(|&u| f(a + u * (b - a))).sum();
481        (b - a) * sum / self.n_samples as f64
482    }
483    /// Estimate ∫_a^b f(x) dx using a control variate `cv` with known integral `cv_integral`.
484    ///
485    /// Chooses optimal coefficient β = Cov(f, cv) / Var(cv) empirically from the same samples.
486    pub fn integrate_with_control_variate(
487        &self,
488        f: &dyn Fn(f64) -> f64,
489        cv: &dyn Fn(f64) -> f64,
490        cv_integral: f64,
491        a: f64,
492        b: f64,
493    ) -> f64 {
494        let n = self.n_samples as usize;
495        let samples = self.lcg_samples(n);
496        let xs: Vec<f64> = samples.iter().map(|&u| a + u * (b - a)).collect();
497        let fv: Vec<f64> = xs.iter().map(|&x| f(x)).collect();
498        let cv_v: Vec<f64> = xs.iter().map(|&x| cv(x)).collect();
499        let f_mean = fv.iter().sum::<f64>() / n as f64;
500        let cv_mean = cv_v.iter().sum::<f64>() / n as f64;
501        let cov: f64 = fv
502            .iter()
503            .zip(cv_v.iter())
504            .map(|(&fi, &ci)| (fi - f_mean) * (ci - cv_mean))
505            .sum::<f64>()
506            / n as f64;
507        let var_cv: f64 = cv_v.iter().map(|&ci| (ci - cv_mean).powi(2)).sum::<f64>() / n as f64;
508        let beta = if var_cv.abs() < 1e-15 {
509            0.0
510        } else {
511            cov / var_cv
512        };
513        let corrected_sum: f64 = fv
514            .iter()
515            .zip(cv_v.iter())
516            .map(|(&fi, &ci)| fi - beta * (ci - cv_integral / (b - a)))
517            .sum::<f64>();
518        (b - a) * corrected_sum / n as f64
519    }
520}