oxilean_std/numerical_analysis/
types.rs1use super::functions::*;
6#[allow(dead_code)]
10pub struct TikhonovSolver {
11 pub lambda: f64,
12}
13#[allow(dead_code)]
14impl TikhonovSolver {
15 pub fn new(lambda: f64) -> Self {
17 Self { lambda }
18 }
19 pub fn solve(&self, a: &[Vec<f64>], b: &[f64]) -> Option<Vec<f64>> {
24 let m = a.len();
25 let n = if m == 0 { return None } else { a[0].len() };
26 let mut ata = vec![vec![0.0f64; n]; n];
27 for i in 0..n {
28 for j in 0..n {
29 let mut s = 0.0;
30 for k in 0..m {
31 s += a[k][i] * a[k][j];
32 }
33 ata[i][j] = s;
34 }
35 ata[i][i] += self.lambda;
36 }
37 let mut atb = vec![0.0f64; n];
38 for i in 0..n {
39 let mut s = 0.0;
40 for k in 0..m {
41 s += a[k][i] * b[k];
42 }
43 atb[i] = s;
44 }
45 gaussian_elimination(ata, atb)
46 }
47}
48pub struct BisectionSolver {
50 pub tol: f64,
51 pub max_iter: u32,
52}
53impl BisectionSolver {
54 pub fn new(tol: f64, max_iter: u32) -> Self {
56 Self { tol, max_iter }
57 }
58 pub fn solve(&self, f: &dyn Fn(f64) -> f64, mut a: f64, mut b: f64) -> Option<f64> {
61 if f(a) * f(b) > 0.0 {
62 return None;
63 }
64 for _ in 0..self.max_iter {
65 let mid = (a + b) / 2.0;
66 let fm = f(mid);
67 if fm.abs() < self.tol || (b - a) / 2.0 < self.tol {
68 return Some(mid);
69 }
70 if f(a) * fm < 0.0 {
71 b = mid;
72 } else {
73 a = mid;
74 }
75 }
76 Some((a + b) / 2.0)
77 }
78}
79pub struct NewtonRaphsonSolver {
81 pub tol: f64,
82 pub max_iter: u32,
83}
84impl NewtonRaphsonSolver {
85 pub fn new(tol: f64, max_iter: u32) -> Self {
87 Self { tol, max_iter }
88 }
89 pub fn solve(
92 &self,
93 f: &dyn Fn(f64) -> f64,
94 df: &dyn Fn(f64) -> f64,
95 mut x: f64,
96 ) -> (f64, bool) {
97 for _ in 0..self.max_iter {
98 let fx = f(x);
99 if fx.abs() < self.tol {
100 return (x, true);
101 }
102 let dfx = df(x);
103 if dfx.abs() < 1e-15 {
104 return (x, false);
105 }
106 x -= fx / dfx;
107 }
108 (x, f(x).abs() < self.tol)
109 }
110}
111pub struct SparseMatrix {
115 pub nrows: usize,
117 pub ncols: usize,
119 pub row_ptr: Vec<usize>,
121 pub col_idx: Vec<usize>,
123 pub values: Vec<f64>,
125}
126impl SparseMatrix {
127 pub fn from_triplets(nrows: usize, ncols: usize, triplets: &[(usize, usize, f64)]) -> Self {
129 let mut counts = vec![0usize; nrows];
130 for &(r, _, _) in triplets {
131 counts[r] += 1;
132 }
133 let mut row_ptr = vec![0usize; nrows + 1];
134 for i in 0..nrows {
135 row_ptr[i + 1] = row_ptr[i] + counts[i];
136 }
137 let nnz = triplets.len();
138 let mut col_idx = vec![0usize; nnz];
139 let mut values = vec![0.0f64; nnz];
140 let mut pos = row_ptr.clone();
141 for &(r, c, v) in triplets {
142 let p = pos[r];
143 col_idx[p] = c;
144 values[p] = v;
145 pos[r] += 1;
146 }
147 Self {
148 nrows,
149 ncols,
150 row_ptr,
151 col_idx,
152 values,
153 }
154 }
155 pub fn matvec(&self, x: &[f64]) -> Vec<f64> {
157 assert_eq!(x.len(), self.ncols, "x length must equal ncols");
158 let mut y = vec![0.0f64; self.nrows];
159 for i in 0..self.nrows {
160 let start = self.row_ptr[i];
161 let end = self.row_ptr[i + 1];
162 for k in start..end {
163 y[i] += self.values[k] * x[self.col_idx[k]];
164 }
165 }
166 y
167 }
168 pub fn nnz(&self) -> usize {
170 self.values.len()
171 }
172}
173#[allow(dead_code)]
175#[derive(Clone, Copy, Debug, PartialEq)]
176pub struct Interval {
177 pub lo: f64,
178 pub hi: f64,
179}
180#[allow(dead_code)]
181impl Interval {
182 pub fn new(lo: f64, hi: f64) -> Self {
184 assert!(lo <= hi, "Interval::new: lo ({lo}) must be <= hi ({hi})");
185 Self { lo, hi }
186 }
187 pub fn point(x: f64) -> Self {
189 Self { lo: x, hi: x }
190 }
191 pub fn add(self, other: Self) -> Self {
193 Self {
194 lo: self.lo + other.lo,
195 hi: self.hi + other.hi,
196 }
197 }
198 pub fn sub(self, other: Self) -> Self {
200 Self {
201 lo: self.lo - other.hi,
202 hi: self.hi - other.lo,
203 }
204 }
205 pub fn mul(self, other: Self) -> Self {
207 let p = [
208 self.lo * other.lo,
209 self.lo * other.hi,
210 self.hi * other.lo,
211 self.hi * other.hi,
212 ];
213 let lo = p.iter().cloned().fold(f64::INFINITY, f64::min);
214 let hi = p.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
215 Self { lo, hi }
216 }
217 pub fn width(self) -> f64 {
219 self.hi - self.lo
220 }
221 pub fn mid(self) -> f64 {
223 (self.lo + self.hi) / 2.0
224 }
225 pub fn contains(self, x: f64) -> bool {
227 self.lo <= x && x <= self.hi
228 }
229 pub fn sqrt(self) -> Self {
231 assert!(
232 self.lo >= 0.0,
233 "Interval::sqrt requires non-negative interval"
234 );
235 Self {
236 lo: self.lo.sqrt(),
237 hi: self.hi.sqrt(),
238 }
239 }
240}
241pub struct PowerIterationSolver {
243 pub tol: f64,
244 pub max_iter: u32,
245}
246impl PowerIterationSolver {
247 pub fn new(tol: f64, max_iter: u32) -> Self {
249 Self { tol, max_iter }
250 }
251 pub fn solve(&self, a: &[Vec<f64>]) -> Option<(f64, Vec<f64>)> {
255 let n = a.len();
256 if n == 0 {
257 return None;
258 }
259 let mut v: Vec<f64> = vec![1.0; n];
260 let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
261 v.iter_mut().for_each(|x| *x /= norm);
262 let mut eigenvalue = 0.0;
263 for _ in 0..self.max_iter {
264 let w: Vec<f64> = (0..n)
265 .map(|i| a[i].iter().zip(v.iter()).map(|(aij, vj)| aij * vj).sum())
266 .collect();
267 let new_eig: f64 = w.iter().zip(v.iter()).map(|(wi, vi)| wi * vi).sum();
268 let w_norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
269 if w_norm < 1e-15 {
270 return None;
271 }
272 let new_v: Vec<f64> = w.iter().map(|x| x / w_norm).collect();
273 if (new_eig - eigenvalue).abs() < self.tol {
274 return Some((new_eig, new_v));
275 }
276 eigenvalue = new_eig;
277 v = new_v;
278 }
279 None
280 }
281}
282pub struct RungeKutta4Solver {
284 pub h: f64,
285}
286impl RungeKutta4Solver {
287 pub fn new(h: f64) -> Self {
289 Self { h }
290 }
291 pub fn step(&self, f: &dyn Fn(f64, f64) -> f64, t: f64, y: f64) -> f64 {
293 let h = self.h;
294 let k1 = f(t, y);
295 let k2 = f(t + h / 2.0, y + h / 2.0 * k1);
296 let k3 = f(t + h / 2.0, y + h / 2.0 * k2);
297 let k4 = f(t + h, y + h * k3);
298 y + h / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
299 }
300 pub fn integrate(
302 &self,
303 f: &dyn Fn(f64, f64) -> f64,
304 t0: f64,
305 y0: f64,
306 t_end: f64,
307 ) -> Vec<(f64, f64)> {
308 let steps = ((t_end - t0) / self.h).ceil() as u64;
309 let mut result = Vec::with_capacity(steps as usize + 1);
310 let mut t = t0;
311 let mut y = y0;
312 result.push((t, y));
313 for _ in 0..steps {
314 let t_next = (t + self.h).min(t_end);
315 let h_actual = t_next - t;
316 let k1 = f(t, y);
317 let k2 = f(t + h_actual / 2.0, y + h_actual / 2.0 * k1);
318 let k3 = f(t + h_actual / 2.0, y + h_actual / 2.0 * k2);
319 let k4 = f(t + h_actual, y + h_actual * k3);
320 y += h_actual / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4);
321 t = t_next;
322 result.push((t, y));
323 if (t - t_end).abs() < 1e-14 {
324 break;
325 }
326 }
327 result
328 }
329}
330#[allow(dead_code)]
334pub struct GradientDescentOptimizer {
335 pub learning_rate: f64,
336 pub tol: f64,
337 pub max_iter: u32,
338}
339#[allow(dead_code)]
340impl GradientDescentOptimizer {
341 pub fn new(learning_rate: f64, tol: f64, max_iter: u32) -> Self {
343 Self {
344 learning_rate,
345 tol,
346 max_iter,
347 }
348 }
349 pub fn minimize(&self, grad: &dyn Fn(&[f64]) -> Vec<f64>, x0: &[f64]) -> (Vec<f64>, u32, bool) {
353 let n = x0.len();
354 let mut x = x0.to_vec();
355 for iter in 0..self.max_iter {
356 let g = grad(&x);
357 let g_norm: f64 = g.iter().map(|v| v * v).sum::<f64>().sqrt();
358 if g_norm < self.tol {
359 return (x, iter, true);
360 }
361 for i in 0..n {
362 x[i] -= self.learning_rate * g[i];
363 }
364 }
365 let g = grad(&x);
366 let g_norm: f64 = g.iter().map(|v| v * v).sum::<f64>().sqrt();
367 (x, self.max_iter, g_norm < self.tol)
368 }
369}
370#[allow(dead_code)]
375pub struct CrankNicolsonSolver {
376 pub kappa: f64,
378 pub domain_length: f64,
380 pub nx: usize,
382 pub dt: f64,
384}
385#[allow(dead_code)]
386impl CrankNicolsonSolver {
387 pub fn new(kappa: f64, domain_length: f64, nx: usize, dt: f64) -> Self {
389 Self {
390 kappa,
391 domain_length,
392 nx,
393 dt,
394 }
395 }
396 pub fn step(&self, u: &[f64]) -> Option<Vec<f64>> {
400 let n = self.nx;
401 assert_eq!(u.len(), n, "u must have length nx");
402 let dx = self.domain_length / (n + 1) as f64;
403 let r = self.kappa * self.dt / (dx * dx);
404 let alpha = -r / 2.0;
405 let beta = 1.0 + r;
406 let mut rhs = vec![0.0f64; n];
407 for i in 0..n {
408 let ul = if i > 0 { u[i - 1] } else { 0.0 };
409 let uc = u[i];
410 let ur = if i < n - 1 { u[i + 1] } else { 0.0 };
411 rhs[i] = (r / 2.0) * ul + (1.0 - r) * uc + (r / 2.0) * ur;
412 }
413 let mut c_prime = vec![0.0f64; n];
414 let mut d_prime = vec![0.0f64; n];
415 c_prime[0] = alpha / beta;
416 d_prime[0] = rhs[0] / beta;
417 for i in 1..n {
418 let denom = beta - alpha * c_prime[i - 1];
419 if denom.abs() < 1e-15 {
420 return None;
421 }
422 c_prime[i] = alpha / denom;
423 d_prime[i] = (rhs[i] - alpha * d_prime[i - 1]) / denom;
424 }
425 let mut u_new = vec![0.0f64; n];
426 u_new[n - 1] = d_prime[n - 1];
427 for i in (0..n - 1).rev() {
428 u_new[i] = d_prime[i] - c_prime[i] * u_new[i + 1];
429 }
430 Some(u_new)
431 }
432 pub fn integrate(&self, u0: &[f64], t_end: f64) -> Vec<Vec<f64>> {
434 let steps = (t_end / self.dt).ceil() as u64;
435 let mut u = u0.to_vec();
436 let mut history = Vec::with_capacity(steps as usize + 1);
437 history.push(u.clone());
438 for _ in 0..steps {
439 if let Some(u_new) = self.step(&u) {
440 u = u_new;
441 } else {
442 break;
443 }
444 history.push(u.clone());
445 }
446 history
447 }
448}
449#[allow(dead_code)]
454pub struct MonteCarloIntegrator {
455 pub n_samples: u64,
456 pub seed: u64,
457}
458#[allow(dead_code)]
459impl MonteCarloIntegrator {
460 pub fn new(n_samples: u64, seed: u64) -> Self {
462 Self { n_samples, seed }
463 }
464 fn lcg_samples(&self, n: usize) -> Vec<f64> {
466 let mut state = self.seed.wrapping_add(1);
467 let mut out = Vec::with_capacity(n);
468 let a: u64 = 1664525;
469 let c: u64 = 1013904223;
470 let m: u64 = 1 << 32;
471 for _ in 0..n {
472 state = (a.wrapping_mul(state).wrapping_add(c)) % m;
473 out.push(state as f64 / m as f64);
474 }
475 out
476 }
477 pub fn integrate(&self, f: &dyn Fn(f64) -> f64, a: f64, b: f64) -> f64 {
479 let samples = self.lcg_samples(self.n_samples as usize);
480 let sum: f64 = samples.iter().map(|&u| f(a + u * (b - a))).sum();
481 (b - a) * sum / self.n_samples as f64
482 }
483 pub fn integrate_with_control_variate(
487 &self,
488 f: &dyn Fn(f64) -> f64,
489 cv: &dyn Fn(f64) -> f64,
490 cv_integral: f64,
491 a: f64,
492 b: f64,
493 ) -> f64 {
494 let n = self.n_samples as usize;
495 let samples = self.lcg_samples(n);
496 let xs: Vec<f64> = samples.iter().map(|&u| a + u * (b - a)).collect();
497 let fv: Vec<f64> = xs.iter().map(|&x| f(x)).collect();
498 let cv_v: Vec<f64> = xs.iter().map(|&x| cv(x)).collect();
499 let f_mean = fv.iter().sum::<f64>() / n as f64;
500 let cv_mean = cv_v.iter().sum::<f64>() / n as f64;
501 let cov: f64 = fv
502 .iter()
503 .zip(cv_v.iter())
504 .map(|(&fi, &ci)| (fi - f_mean) * (ci - cv_mean))
505 .sum::<f64>()
506 / n as f64;
507 let var_cv: f64 = cv_v.iter().map(|&ci| (ci - cv_mean).powi(2)).sum::<f64>() / n as f64;
508 let beta = if var_cv.abs() < 1e-15 {
509 0.0
510 } else {
511 cov / var_cv
512 };
513 let corrected_sum: f64 = fv
514 .iter()
515 .zip(cv_v.iter())
516 .map(|(&fi, &ci)| fi - beta * (ci - cv_integral / (b - a)))
517 .sum::<f64>();
518 (b - a) * corrected_sum / n as f64
519 }
520}