use crate::error::{IntegrateError, IntegrateResult};
use super::embedded_rk::OdeResult;
fn gaussian_solve(a: &[f64], b: &[f64]) -> Result<Vec<f64>, IntegrateError> {
let n = b.len();
debug_assert_eq!(a.len(), n * n, "matrix/vector size mismatch");
let mut mat: Vec<f64> = a.to_vec();
let mut rhs: Vec<f64> = b.to_vec();
for col in 0..n {
let pivot_row = (col..n)
.max_by(|&r1, &r2| {
mat[r1 * n + col]
.abs()
.partial_cmp(&mat[r2 * n + col].abs())
.unwrap_or(std::cmp::Ordering::Equal)
})
.ok_or_else(|| {
IntegrateError::LinearSolveError("empty row range".to_string())
})?;
if mat[pivot_row * n + col].abs() < 1e-300 {
return Err(IntegrateError::LinearSolveError(format!(
"singular or near-singular matrix at column {col}"
)));
}
if pivot_row != col {
for j in 0..n {
mat.swap(col * n + j, pivot_row * n + j);
}
rhs.swap(col, pivot_row);
}
let pivot = mat[col * n + col];
for row in (col + 1)..n {
let factor = mat[row * n + col] / pivot;
for j in col..n {
let val = mat[col * n + j];
mat[row * n + j] -= factor * val;
}
rhs[row] -= factor * rhs[col];
}
}
let mut x = vec![0.0_f64; n];
for row in (0..n).rev() {
let mut s = rhs[row];
for j in (row + 1)..n {
s -= mat[row * n + j] * x[j];
}
x[row] = s / mat[row * n + row];
}
Ok(x)
}
fn identity_minus_hj(h: f64, jac: &[Vec<f64>], n: usize) -> Vec<f64> {
let mut mat = vec![0.0_f64; n * n];
for i in 0..n {
for j in 0..n {
let delta = if i == j { 1.0 } else { 0.0 };
mat[i * n + j] = delta - h * jac[i][j];
}
}
mat
}
const MAX_NEWTON: usize = 50;
const NEWTON_TOL: f64 = 1e-10;
fn newton_implicit_euler<F, J>(
f: &F,
jac: &J,
t_new: f64,
y_old: &[f64],
h: f64,
y_guess: &[f64],
) -> IntegrateResult<(Vec<f64>, usize)>
where
F: Fn(f64, &[f64]) -> Vec<f64>,
J: Fn(f64, &[f64]) -> Vec<Vec<f64>>,
{
let n = y_old.len();
let mut y = y_guess.to_vec();
let mut n_evals: usize = 0;
for _iter in 0..MAX_NEWTON {
let fy = f(t_new, &y);
let jy = jac(t_new, &y);
n_evals += 2;
let residual: Vec<f64> = (0..n)
.map(|i| y[i] - y_old[i] - h * fy[i])
.collect();
let res_norm: f64 = residual
.iter()
.map(|r| r * r)
.sum::<f64>()
.sqrt();
if res_norm < NEWTON_TOL {
return Ok((y, n_evals));
}
let a = identity_minus_hj(h, &jy, n);
let neg_residual: Vec<f64> = residual.iter().map(|r| -r).collect();
let delta = gaussian_solve(&a, &neg_residual)?;
let delta_norm: f64 = delta.iter().map(|d| d * d).sum::<f64>().sqrt();
for i in 0..n {
y[i] += delta[i];
}
if delta_norm < NEWTON_TOL * (1.0 + y.iter().map(|v| v.abs()).fold(0.0_f64, f64::max)) {
return Ok((y, n_evals));
}
}
Err(IntegrateError::ConvergenceError(format!(
"Newton iteration did not converge within {MAX_NEWTON} iterations at t={t_new}"
)))
}
fn newton_trapezoidal<F, J>(
f: &F,
jac: &J,
_t_old: f64,
t_new: f64,
y_old: &[f64],
f_old: &[f64],
h: f64,
y_guess: &[f64],
) -> IntegrateResult<(Vec<f64>, usize)>
where
F: Fn(f64, &[f64]) -> Vec<f64>,
J: Fn(f64, &[f64]) -> Vec<Vec<f64>>,
{
let n = y_old.len();
let mut y = y_guess.to_vec();
let mut n_evals: usize = 0;
let h2 = h / 2.0;
for _iter in 0..MAX_NEWTON {
let fy = f(t_new, &y);
let jy = jac(t_new, &y);
n_evals += 2;
let residual: Vec<f64> = (0..n)
.map(|i| y[i] - y_old[i] - h2 * (f_old[i] + fy[i]))
.collect();
let res_norm: f64 = residual
.iter()
.map(|r| r * r)
.sum::<f64>()
.sqrt();
if res_norm < NEWTON_TOL {
return Ok((y, n_evals));
}
let a = identity_minus_hj(h2, &jy, n);
let neg_residual: Vec<f64> = residual.iter().map(|r| -r).collect();
let delta = gaussian_solve(&a, &neg_residual)?;
let delta_norm: f64 = delta.iter().map(|d| d * d).sum::<f64>().sqrt();
for i in 0..n {
y[i] += delta[i];
}
if delta_norm < NEWTON_TOL * (1.0 + y.iter().map(|v| v.abs()).fold(0.0_f64, f64::max)) {
return Ok((y, n_evals));
}
}
Err(IntegrateError::ConvergenceError(format!(
"Newton (trapezoidal) did not converge within {MAX_NEWTON} iterations at t={t_new}"
)))
}
pub fn implicit_euler<F, J>(
f: F,
jac: J,
t0: f64,
y0: &[f64],
t_end: f64,
h: f64,
) -> IntegrateResult<OdeResult>
where
F: Fn(f64, &[f64]) -> Vec<f64>,
J: Fn(f64, &[f64]) -> Vec<Vec<f64>>,
{
if h <= 0.0 {
return Err(IntegrateError::ValueError(format!(
"step size h={h} must be positive"
)));
}
if t_end < t0 {
return Err(IntegrateError::ValueError(
"t_end must be >= t0 for implicit_euler".to_string(),
));
}
if y0.is_empty() {
return Err(IntegrateError::ValueError(
"y0 must be non-empty".to_string(),
));
}
let mut t = t0;
let mut y = y0.to_vec();
let mut t_out = vec![t0];
let mut y_out = vec![y0.to_vec()];
let mut n_steps: usize = 0;
let mut n_rejected: usize = 0;
let mut n_evals: usize = 0;
while t < t_end - h * 1e-10 {
let h_actual = h.min(t_end - t);
let t_new = t + h_actual;
let f_cur = f(t, &y);
n_evals += 1;
let y_guess: Vec<f64> = y
.iter()
.zip(f_cur.iter())
.map(|(yi, fi)| yi + h_actual * fi)
.collect();
match newton_implicit_euler(&f, &jac, t_new, &y, h_actual, &y_guess) {
Ok((y_new, evals)) => {
n_evals += evals;
n_steps += 1;
t = t_new;
y = y_new;
t_out.push(t);
y_out.push(y.clone());
}
Err(e) => {
let _n_rejected = n_rejected + 1;
return Err(e);
}
}
}
Ok(OdeResult {
t: t_out,
y: y_out,
n_steps,
n_rejected,
n_evals,
})
}
pub fn trapezoidal<F, J>(
f: F,
jac: J,
t0: f64,
y0: &[f64],
t_end: f64,
h: f64,
) -> IntegrateResult<OdeResult>
where
F: Fn(f64, &[f64]) -> Vec<f64>,
J: Fn(f64, &[f64]) -> Vec<Vec<f64>>,
{
if h <= 0.0 {
return Err(IntegrateError::ValueError(format!(
"step size h={h} must be positive"
)));
}
if t_end < t0 {
return Err(IntegrateError::ValueError(
"t_end must be >= t0 for trapezoidal".to_string(),
));
}
if y0.is_empty() {
return Err(IntegrateError::ValueError(
"y0 must be non-empty".to_string(),
));
}
let mut t = t0;
let mut y = y0.to_vec();
let mut t_out = vec![t0];
let mut y_out = vec![y0.to_vec()];
let mut n_steps: usize = 0;
let mut n_rejected: usize = 0;
let mut n_evals: usize = 0;
while t < t_end - h * 1e-10 {
let h_actual = h.min(t_end - t);
let t_new = t + h_actual;
let f_cur = f(t, &y);
n_evals += 1;
let y_guess: Vec<f64> = y
.iter()
.zip(f_cur.iter())
.map(|(yi, fi)| yi + h_actual * fi)
.collect();
match newton_trapezoidal(&f, &jac, t, t_new, &y, &f_cur, h_actual, &y_guess) {
Ok((y_new, evals)) => {
n_evals += evals;
n_steps += 1;
t = t_new;
y = y_new;
t_out.push(t);
y_out.push(y.clone());
}
Err(e) => {
let _n_rejected = n_rejected + 1;
return Err(e);
}
}
}
Ok(OdeResult {
t: t_out,
y: y_out,
n_steps,
n_rejected,
n_evals,
})
}
pub fn bdf2<F, J>(
f: F,
jac: J,
t0: f64,
y0: &[f64],
t_end: f64,
h: f64,
) -> IntegrateResult<OdeResult>
where
F: Fn(f64, &[f64]) -> Vec<f64>,
J: Fn(f64, &[f64]) -> Vec<Vec<f64>>,
{
if h <= 0.0 {
return Err(IntegrateError::ValueError(format!(
"step size h={h} must be positive"
)));
}
if t_end < t0 {
return Err(IntegrateError::ValueError(
"t_end must be >= t0 for bdf2".to_string(),
));
}
if y0.is_empty() {
return Err(IntegrateError::ValueError(
"y0 must be non-empty".to_string(),
));
}
let n = y0.len();
let mut t = t0;
let mut y = y0.to_vec();
let mut t_out = vec![t0];
let mut y_out = vec![y0.to_vec()];
let mut n_steps: usize = 0;
let mut n_rejected: usize = 0;
let mut n_evals: usize = 0;
let mut y_prev: Vec<f64> = y.clone();
let mut have_history = false;
while t < t_end - h * 1e-10 {
let h_actual = h.min(t_end - t);
let t_new = t + h_actual;
let f_cur = f(t, &y);
n_evals += 1;
let y_guess: Vec<f64> = y
.iter()
.zip(f_cur.iter())
.map(|(yi, fi)| yi + h_actual * fi)
.collect();
if !have_history {
match newton_implicit_euler(&f, &jac, t_new, &y, h_actual, &y_guess) {
Ok((y_new, evals)) => {
n_evals += evals;
n_steps += 1;
y_prev = y.clone();
t = t_new;
y = y_new;
t_out.push(t);
y_out.push(y.clone());
have_history = true;
}
Err(e) => {
let _n_rejected = n_rejected + 1;
return Err(e);
}
}
} else {
let y_prev_snap = y_prev.clone();
let y_snap = y.clone();
let mut z = y_guess.clone();
let mut converged = false;
let mut bdf2_evals: usize = 0;
for _iter in 0..MAX_NEWTON {
let fz = f(t_new, &z);
let jz = jac(t_new, &z);
bdf2_evals += 2;
let residual: Vec<f64> = (0..n)
.map(|i| {
1.5 * z[i]
- 2.0 * y_snap[i]
+ 0.5 * y_prev_snap[i]
- h_actual * fz[i]
})
.collect();
let res_norm: f64 = residual
.iter()
.map(|r| r * r)
.sum::<f64>()
.sqrt();
if res_norm < NEWTON_TOL {
converged = true;
break;
}
let mut a = vec![0.0_f64; n * n];
for i in 0..n {
for j in 0..n {
let delta = if i == j { 1.5 } else { 0.0 };
a[i * n + j] = delta - h_actual * jz[i][j];
}
}
let neg_residual: Vec<f64> = residual.iter().map(|r| -r).collect();
let delta = gaussian_solve(&a, &neg_residual)?;
let delta_norm: f64 = delta.iter().map(|d| d * d).sum::<f64>().sqrt();
for i in 0..n {
z[i] += delta[i];
}
if delta_norm
< NEWTON_TOL * (1.0 + z.iter().map(|v| v.abs()).fold(0.0_f64, f64::max))
{
converged = true;
break;
}
}
n_evals += bdf2_evals;
if !converged {
let _n_rejected = n_rejected + 1;
return Err(IntegrateError::ConvergenceError(format!(
"BDF-2 Newton iteration did not converge at t={t_new}"
)));
}
y_prev = y.clone();
n_steps += 1;
t = t_new;
y = z;
t_out.push(t);
y_out.push(y.clone());
}
}
Ok(OdeResult {
t: t_out,
y: y_out,
n_steps,
n_rejected,
n_evals,
})
}
#[cfg(test)]
mod tests {
use super::*;
fn stiff_decay(t: f64, y: &[f64]) -> Vec<f64> {
let _ = t;
vec![-1000.0 * y[0]]
}
fn stiff_decay_jac(t: f64, y: &[f64]) -> Vec<Vec<f64>> {
let _ = (t, y);
vec![vec![-1000.0]]
}
fn gentle_decay(t: f64, y: &[f64]) -> Vec<f64> {
let _ = t;
vec![-y[0]]
}
fn gentle_decay_jac(t: f64, y: &[f64]) -> Vec<Vec<f64>> {
let _ = (t, y);
vec![vec![-1.0]]
}
fn linear_2d(t: f64, y: &[f64]) -> Vec<f64> {
let _ = t;
vec![-10.0 * y[0] + y[1], -y[1]]
}
fn linear_2d_jac(t: f64, y: &[f64]) -> Vec<Vec<f64>> {
let _ = (t, y);
vec![vec![-10.0, 1.0], vec![0.0, -1.0]]
}
#[test]
fn implicit_euler_gentle_decay() {
let result = implicit_euler(gentle_decay, gentle_decay_jac, 0.0, &[1.0], 5.0, 0.1)
.expect("implicit_euler failed");
let y_end = result.y.last().expect("empty result")[0];
let exact = (-5.0_f64).exp();
assert!(
(y_end - exact).abs() < 0.05,
"y_end={y_end}, exact={exact}"
);
assert!(result.n_steps > 0);
}
#[test]
fn implicit_euler_stiff_decay() {
let result =
implicit_euler(stiff_decay, stiff_decay_jac, 0.0, &[1.0], 0.01, 1e-5)
.expect("implicit_euler stiff failed");
let y_end = result.y.last().expect("empty result")[0];
let exact = (-10.0_f64).exp();
assert!(
(y_end - exact).abs() < exact * 0.10,
"y_end={y_end}, exact={exact}"
);
}
#[test]
fn trapezoidal_gentle_decay() {
let result =
trapezoidal(gentle_decay, gentle_decay_jac, 0.0, &[1.0], 5.0, 0.1)
.expect("trapezoidal failed");
let y_end = result.y.last().expect("empty result")[0];
let exact = (-5.0_f64).exp();
assert!(
(y_end - exact).abs() / exact < 0.005,
"rel_err={}",
(y_end - exact).abs() / exact
);
}
#[test]
fn trapezoidal_2d() {
let result = trapezoidal(
linear_2d,
linear_2d_jac,
0.0,
&[1.0, 1.0],
1.0,
0.05,
)
.expect("trapezoidal 2d failed");
let y_end = result.y.last().expect("empty result");
let exact_y1 = (-1.0_f64).exp();
assert!(
(y_end[1] - exact_y1).abs() / exact_y1 < 0.01,
"y1_end={}, exact={}",
y_end[1],
exact_y1
);
}
#[test]
fn bdf2_gentle_decay() {
let result =
bdf2(gentle_decay, gentle_decay_jac, 0.0, &[1.0], 5.0, 0.1)
.expect("bdf2 failed");
let y_end = result.y.last().expect("empty result")[0];
let exact = (-5.0_f64).exp();
assert!(
(y_end - exact).abs() / exact < 0.02,
"rel_err={}",
(y_end - exact).abs() / exact
);
}
#[test]
fn bdf2_stiff() {
let result =
bdf2(stiff_decay, stiff_decay_jac, 0.0, &[1.0], 0.01, 1e-4)
.expect("bdf2 stiff failed");
let y_end = result.y.last().expect("empty result")[0];
let exact = (-10.0_f64).exp();
assert!(
(y_end - exact).abs() < exact * 0.10,
"y_end={y_end}, exact={exact}"
);
}
#[test]
fn implicit_euler_validates_inputs() {
assert!(implicit_euler(gentle_decay, gentle_decay_jac, 0.0, &[1.0], 1.0, -0.1).is_err());
assert!(implicit_euler(gentle_decay, gentle_decay_jac, 1.0, &[1.0], 0.0, 0.1).is_err());
assert!(implicit_euler(gentle_decay, gentle_decay_jac, 0.0, &[], 1.0, 0.1).is_err());
}
}