use crate::error::{IntegrateError, IntegrateResult};
use scirs2_core::ndarray::{Array1, Array2};
fn gauss_solve(a: &mut Array2<f64>, b: &mut Array1<f64>) -> IntegrateResult<Array1<f64>> {
let n = b.len();
for col in 0..n {
let mut max_row = col;
let mut max_val = a[[col, col]].abs();
for row in (col + 1)..n {
let v = a[[row, col]].abs();
if v > max_val {
max_val = v;
max_row = row;
}
}
if max_val < 1e-300 {
return Err(IntegrateError::LinearSolveError(
"Singular matrix in integral equation solve".to_string(),
));
}
if max_row != col {
for j in col..n {
let tmp = a[[col, j]];
a[[col, j]] = a[[max_row, j]];
a[[max_row, j]] = tmp;
}
b.swap(col, max_row);
}
let pivot = a[[col, col]];
for row in (col + 1)..n {
let factor = a[[row, col]] / pivot;
for j in col..n {
let u = factor * a[[col, j]];
a[[row, j]] -= u;
}
let bup = factor * b[col];
b[row] -= bup;
}
}
let mut x = Array1::<f64>::zeros(n);
for i in (0..n).rev() {
let mut s = b[i];
for j in (i + 1)..n {
s -= a[[i, j]] * x[j];
}
x[i] = s / a[[i, i]];
}
Ok(x)
}
fn gauss_legendre(npts: usize) -> (Vec<f64>, Vec<f64>) {
match npts {
1 => (vec![0.0], vec![2.0]),
2 => (
vec![-0.5773502691896258, 0.5773502691896258],
vec![1.0, 1.0],
),
3 => (
vec![-0.7745966692414834, 0.0, 0.7745966692414834],
vec![0.5555555555555556, 0.8888888888888889, 0.5555555555555556],
),
4 => (
vec![
-0.8611363115940526, -0.3399810435848563,
0.3399810435848563, 0.8611363115940526,
],
vec![
0.3478548451374538, 0.6521451548625461,
0.6521451548625461, 0.3478548451374538,
],
),
5 => (
vec![
-0.9061798459386640, -0.5384693101056831, 0.0,
0.5384693101056831, 0.9061798459386640,
],
vec![
0.2369268850561891, 0.4786286704993665, 0.5688888888888889,
0.4786286704993665, 0.2369268850561891,
],
),
8 => (
vec![
-0.9602898564975363, -0.7966664774136267, -0.5255324099163290, -0.1834346424956498,
0.1834346424956498, 0.5255324099163290, 0.7966664774136267, 0.9602898564975363,
],
vec![
0.1012285362903763, 0.2223810344533745, 0.3137066458778873, 0.3626837833783620,
0.3626837833783620, 0.3137066458778873, 0.2223810344533745, 0.1012285362903763,
],
),
_ => {
(
vec![
-0.9061798459386640, -0.5384693101056831, 0.0,
0.5384693101056831, 0.9061798459386640,
],
vec![
0.2369268850561891, 0.4786286704993665, 0.5688888888888889,
0.4786286704993665, 0.2369268850561891,
],
)
}
}
}
#[derive(Debug, Clone)]
pub struct VolterraResult {
pub x: Vec<f64>,
pub u: Vec<f64>,
pub error_estimate: f64,
pub n_steps: usize,
}
#[derive(Debug, Clone)]
pub struct VolterraConfig {
pub n: usize,
pub b: f64,
pub quadrature: QuadratureRule,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum QuadratureRule {
Trapezoidal,
Simpson,
}
impl Default for VolterraConfig {
fn default() -> Self {
Self {
n: 100,
b: 1.0,
quadrature: QuadratureRule::Trapezoidal,
}
}
}
pub struct VolterraIE2ndKind;
impl VolterraIE2ndKind {
pub fn solve<FRhs, FKern>(
f: &FRhs,
kernel: &FKern,
lambda: f64,
cfg: &VolterraConfig,
) -> IntegrateResult<VolterraResult>
where
FRhs: Fn(f64) -> f64,
FKern: Fn(f64, f64) -> f64,
{
let n = cfg.n;
let b = cfg.b;
let h = b / n as f64;
let xs: Vec<f64> = (0..=n).map(|i| i as f64 * h).collect();
let mut u = vec![0.0_f64; n + 1];
u[0] = f(xs[0]);
match cfg.quadrature {
QuadratureRule::Trapezoidal => {
for i in 1..=n {
let xi = xs[i];
let mut sum = 0.5 * kernel(xi, xs[0]) * u[0];
for j in 1..i {
sum += kernel(xi, xs[j]) * u[j];
}
let diag = 1.0 - lambda * h * 0.5 * kernel(xi, xi);
if diag.abs() < 1e-15 {
return Err(IntegrateError::ComputationError(
format!("Near-zero diagonal at step {}: the Volterra equation may be ill-conditioned", i)
));
}
u[i] = (f(xi) + lambda * h * sum) / diag;
}
}
QuadratureRule::Simpson => {
for i in 1..=n {
let xi = xs[i];
let sum = if i % 2 == 0 {
let mut s = 0.0;
if i >= 2 {
s += kernel(xi, xs[0]) * u[0];
for j in 1..i {
let coeff = if j % 2 == 1 { 4.0 } else { 2.0 };
s += coeff * kernel(xi, xs[j]) * u[j];
}
s *= h / 3.0;
}
s
} else {
let mut s = 0.5 * kernel(xi, xs[0]) * u[0];
for j in 1..i {
s += kernel(xi, xs[j]) * u[j];
}
s * h
};
let k_diag = kernel(xi, xi);
let diag_coeff = if i % 2 == 0 {
1.0 - lambda * h * 0.5 * k_diag
} else {
1.0 - lambda * h * 0.5 * k_diag
};
if diag_coeff.abs() < 1e-15 {
return Err(IntegrateError::ComputationError(
format!("Near-zero diagonal at step {}", i)
));
}
u[i] = (f(xi) + lambda * sum) / diag_coeff;
}
}
}
Ok(VolterraResult {
x: xs,
u,
error_estimate: 0.0, n_steps: n,
})
}
}
#[derive(Debug, Clone)]
pub struct FredholmResult {
pub x: Vec<f64>,
pub u: Vec<f64>,
pub condition_estimate: f64,
}
#[derive(Debug, Clone)]
pub struct FredholmConfig {
pub a: f64,
pub b: f64,
pub n_quad: usize,
pub lambda: f64,
}
impl Default for FredholmConfig {
fn default() -> Self {
Self {
a: 0.0,
b: 1.0,
n_quad: 20,
lambda: 1.0,
}
}
}
pub struct FredholmIE2ndKind;
impl FredholmIE2ndKind {
pub fn solve<FRhs, FKern>(
f: &FRhs,
kernel: &FKern,
cfg: &FredholmConfig,
) -> IntegrateResult<FredholmResult>
where
FRhs: Fn(f64) -> f64,
FKern: Fn(f64, f64) -> f64,
{
let a = cfg.a;
let b = cfg.b;
let lam = cfg.lambda;
let (xi_ref, wi_ref) = gauss_legendre(cfg.n_quad);
let n = xi_ref.len();
let half_len = (b - a) * 0.5;
let mid = (a + b) * 0.5;
let nodes: Vec<f64> = xi_ref.iter().map(|&xi| mid + half_len * xi).collect();
let weights: Vec<f64> = wi_ref.iter().map(|&wi| wi * half_len).collect();
let mut a_mat = Array2::<f64>::zeros((n, n));
let mut rhs = Array1::<f64>::zeros(n);
for i in 0..n {
rhs[i] = f(nodes[i]);
for j in 0..n {
let k_ij = kernel(nodes[i], nodes[j]);
a_mat[[i, j]] = if i == j {
1.0 - lam * weights[j] * k_ij
} else {
-lam * weights[j] * k_ij
};
}
}
let diag_max = (0..n).fold(f64::NEG_INFINITY, |m, i| m.max(a_mat[[i, i]].abs()));
let diag_min = (0..n).fold(f64::INFINITY, |m, i| m.min(a_mat[[i, i]].abs()));
let condition_estimate = if diag_min > 1e-300 { diag_max / diag_min } else { f64::INFINITY };
let u_nodes = gauss_solve(&mut a_mat, &mut rhs)?;
Ok(FredholmResult {
x: nodes,
u: u_nodes.to_vec(),
condition_estimate,
})
}
pub fn evaluate<FRhs, FKern>(
x: f64,
f: &FRhs,
kernel: &FKern,
result: &FredholmResult,
lambda: f64,
) -> f64
where
FRhs: Fn(f64) -> f64,
FKern: Fn(f64, f64) -> f64,
{
let n = result.x.len();
if n == 0 { return f(x); }
let a = result.x[0];
let b = result.x[n - 1];
let w = (b - a) / n as f64;
let sum: f64 = result.x.iter().zip(result.u.iter())
.map(|(&tj, &uj)| kernel(x, tj) * uj * w)
.sum();
f(x) + lambda * sum
}
}
#[derive(Debug, Clone)]
pub struct AbelResult {
pub x: Vec<f64>,
pub u: Vec<f64>,
pub residual: f64,
}
#[derive(Debug, Clone)]
pub struct AbelConfig {
pub n: usize,
pub b: f64,
pub alpha: f64,
pub regularization: f64,
}
impl Default for AbelConfig {
fn default() -> Self {
Self {
n: 50,
b: 1.0,
alpha: 0.5,
regularization: 0.0,
}
}
}
pub struct AbelEquation;
impl AbelEquation {
pub fn solve<FRhs>(
f: &FRhs,
cfg: &AbelConfig,
) -> IntegrateResult<AbelResult>
where
FRhs: Fn(f64) -> f64,
{
let alpha = cfg.alpha;
if !(0.0 < alpha && alpha < 1.0) {
return Err(IntegrateError::InvalidInput(format!(
"Abel exponent alpha={} must be in (0, 1)",
alpha
)));
}
let n = cfg.n;
let b = cfg.b;
let h = b / n as f64;
let beta = 1.0 - alpha;
let xs: Vec<f64> = (0..=n).map(|i| i as f64 * h).collect();
let fs: Vec<f64> = xs.iter().map(|&x| f(x)).collect();
let mut i_beta = vec![0.0_f64; n + 1];
i_beta[0] = 0.0;
for i in 1..=n {
let xi = xs[i];
let mut sum = 0.0_f64;
for j in 0..i {
let tj = xs[j];
let diff = xi - tj;
if diff <= 0.0 { continue; }
let wj = if j == 0 {
let t_hi = h * 0.5_f64;
let hi = xi - t_hi;
if hi > 0.0 {
(xi.powf(1.0 - beta) - hi.powf(1.0 - beta)) / (1.0 - beta)
} else {
xi.powf(1.0 - beta) / (1.0 - beta)
}
} else if j == i - 1 {
let t_lo = xs[j] + h * 0.5;
let lo = xi - t_lo;
if lo > 0.0 {
lo.powf(1.0 - beta) / (1.0 - beta)
} else {
(h * 0.5).powf(1.0 - beta) / (1.0 - beta)
}
} else {
let t_lo = tj - h * 0.5;
let t_hi = tj + h * 0.5;
let lo = (xi - t_hi).max(0.0);
let hi = xi - t_lo;
(hi.powf(1.0 - beta) - lo.powf(1.0 - beta)) / (1.0 - beta)
};
sum += wj * fs[j];
}
i_beta[i] = sum;
}
let prefactor = (alpha * std::f64::consts::PI).sin() / std::f64::consts::PI;
let mut u = vec![0.0_f64; n + 1];
let reg = cfg.regularization;
let i_smooth = if reg > 0.0 {
tikhonov_smooth(&i_beta, reg, h)
} else {
i_beta.clone()
};
u[0] = prefactor * (i_smooth[1] - i_smooth[0]) / h;
for i in 1..n {
u[i] = prefactor * (i_smooth[i + 1] - i_smooth[i - 1]) / (2.0 * h);
}
u[n] = prefactor * (i_smooth[n] - i_smooth[n - 1]) / h;
let residual = compute_abel_residual(&xs, &u, &fs, alpha, h);
Ok(AbelResult { x: xs, u, residual })
}
}
fn tikhonov_smooth(v: &[f64], reg: f64, _h: f64) -> Vec<f64> {
let n = v.len();
if n < 3 || reg <= 0.0 { return v.to_vec(); }
let diag_val = 1.0 + 2.0 * reg;
let off_val = -reg;
let mut c = vec![0.0_f64; n - 1];
let mut d = v.to_vec();
c[0] = off_val / diag_val;
d[0] /= diag_val;
for i in 1..n - 1 {
let denom = diag_val - off_val * c[i - 1];
if denom.abs() < 1e-300 { return v.to_vec(); }
if i < n - 1 { c[i] = off_val / denom; }
d[i] = (d[i] - off_val * d[i - 1]) / denom;
}
let denom = diag_val - off_val * c[n - 2];
if denom.abs() < 1e-300 { return v.to_vec(); }
d[n - 1] = (d[n - 1] - off_val * d[n - 2]) / denom;
let mut result = d.clone();
for i in (0..n - 1).rev() {
result[i] -= c[i] * result[i + 1];
}
result
}
fn compute_abel_residual(
xs: &[f64],
u: &[f64],
fs: &[f64],
alpha: f64,
h: f64,
) -> f64 {
let n = xs.len();
let mut sq_sum = 0.0_f64;
for i in 1..n {
let xi = xs[i];
let mut au_i = 0.0_f64;
for j in 0..i {
let tj = xs[j];
let diff = xi - tj;
if diff <= 0.0 { continue; }
let wj = if j == 0 || j == i - 1 { 0.5 * h } else { h };
au_i += wj * u[j] / diff.powf(alpha);
}
let err = au_i - fs[i];
sq_sum += err * err;
}
(sq_sum / (n - 1).max(1) as f64).sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_volterra_exponential() {
let f = |_x: f64| 1.0_f64;
let kernel = |_x: f64, _t: f64| 1.0_f64;
let cfg = VolterraConfig {
n: 200,
b: 1.0,
quadrature: QuadratureRule::Trapezoidal,
};
let result = VolterraIE2ndKind::solve(&f, &kernel, 1.0, &cfg)
.expect("Volterra solve failed");
let points = [0.0, 0.25, 0.5, 0.75, 1.0];
for &xp in &points {
let idx = ((xp / cfg.b) * cfg.n as f64).round() as usize;
if idx < result.u.len() {
let exact = xp.exp();
let err = (result.u[idx] - exact).abs();
assert!(
err < 0.05,
"Volterra u({}) = {} but exact = {} (err = {})",
xp, result.u[idx], exact, err
);
}
}
}
#[test]
fn test_volterra_trivial() {
let f = |_x: f64| 1.0_f64;
let kernel = |_x: f64, _t: f64| 0.0_f64;
let cfg = VolterraConfig {
n: 50,
b: 2.0,
quadrature: QuadratureRule::Trapezoidal,
};
let result = VolterraIE2ndKind::solve(&f, &kernel, 1.0, &cfg)
.expect("Trivial Volterra solve failed");
for &u_val in &result.u {
assert!((u_val - 1.0).abs() < 1e-12, "u should be 1 everywhere");
}
}
#[test]
fn test_fredholm_identity_kernel() {
let f = |x: f64| (std::f64::consts::PI * x).sin();
let kernel = |_x: f64, _t: f64| 0.0_f64;
let cfg = FredholmConfig {
a: 0.0,
b: 1.0,
n_quad: 10,
lambda: 1.0,
};
let result = FredholmIE2ndKind::solve(&f, &kernel, &cfg)
.expect("Fredholm solve failed");
for (xi, ui) in result.x.iter().zip(result.u.iter()) {
let exact = f(*xi);
assert!(
(ui - exact).abs() < 1e-10,
"u({}) = {} != f({}) = {}",
xi, ui, xi, exact
);
}
}
#[test]
fn test_fredholm_separable_kernel() {
let lam = 0.5_f64;
let c_exact = 1.0 / (std::f64::consts::PI * (1.0 - lam / 3.0));
let f = |x: f64| (std::f64::consts::PI * x).sin();
let kernel = |x: f64, t: f64| x * t;
let cfg = FredholmConfig {
a: 0.0,
b: 1.0,
n_quad: 16,
lambda: lam,
};
let result = FredholmIE2ndKind::solve(&f, &kernel, &cfg)
.expect("Fredholm separable solve failed");
for (xi, ui) in result.x.iter().zip(result.u.iter()) {
let exact = (std::f64::consts::PI * xi).sin() + lam * c_exact * xi;
assert!(
(ui - exact).abs() < 0.01,
"u({:.3}) = {:.6} != exact {:.6}",
xi, ui, exact
);
}
}
#[test]
fn test_abel_constant() {
let f = |x: f64| 2.0 * x.sqrt();
let cfg = AbelConfig {
n: 100,
b: 1.0,
alpha: 0.5,
regularization: 1e-4,
};
let result = AbelEquation::solve(&f, &cfg).expect("Abel solve failed");
let interior_start = cfg.n / 5;
let interior_end = cfg.n * 4 / 5;
let interior_u: Vec<f64> = result.u[interior_start..interior_end].to_vec();
let mean_u: f64 = interior_u.iter().sum::<f64>() / interior_u.len() as f64;
assert!(
(mean_u - 1.0).abs() < 0.3,
"Abel u mean in interior = {} (expected ~1.0)",
mean_u
);
}
}