use crate::error::{InterpolateError, InterpolateResult};
use scirs2_core::ndarray::{Array1, Array2};
use std::f64::consts::PI;
pub fn newton_divided_differences(x: &[f64], y: &[f64]) -> InterpolateResult<Vec<f64>> {
let n = x.len();
if n == 0 {
return Err(InterpolateError::InsufficientData(
"newton_divided_differences requires at least one node".to_string(),
));
}
if n != y.len() {
return Err(InterpolateError::DimensionMismatch(format!(
"x has {n} elements but y has {} elements",
y.len()
)));
}
for i in 0..n {
for j in (i + 1)..n {
if (x[i] - x[j]).abs() < 1e-14 {
return Err(InterpolateError::InvalidInput {
message: format!(
"Duplicate nodes x[{i}] = x[{j}] = {} detected in Newton divided differences",
x[i]
),
});
}
}
}
let mut dd: Vec<f64> = y.to_vec();
for pass in 1..n {
for k in (pass..n).rev() {
let denom = x[k] - x[k - pass];
dd[k] = (dd[k] - dd[k - 1]) / denom;
}
}
Ok(dd)
}
pub fn newton_polynomial(x_data: &[f64], dd_table: &[f64], t: f64) -> InterpolateResult<f64> {
let n = x_data.len();
if n == 0 {
return Err(InterpolateError::InsufficientData(
"newton_polynomial requires at least one node".to_string(),
));
}
if n != dd_table.len() {
return Err(InterpolateError::DimensionMismatch(format!(
"x_data has {n} nodes but dd_table has {} entries",
dd_table.len()
)));
}
let mut p = dd_table[n - 1];
for k in (0..n - 1).rev() {
p = dd_table[k] + p * (t - x_data[k]);
}
Ok(p)
}
pub fn lagrange_interpolate(x_data: &[f64], y_data: &[f64], t: f64) -> InterpolateResult<f64> {
let n = x_data.len();
if n == 0 {
return Err(InterpolateError::InsufficientData(
"lagrange_interpolate requires at least one node".to_string(),
));
}
if n != y_data.len() {
return Err(InterpolateError::DimensionMismatch(format!(
"x_data has {n} elements but y_data has {} elements",
y_data.len()
)));
}
for i in 0..n {
for j in (i + 1)..n {
if (x_data[i] - x_data[j]).abs() < 1e-14 {
return Err(InterpolateError::InvalidInput {
message: format!(
"Duplicate nodes x[{i}] = x[{j}] = {} in Lagrange interpolation",
x_data[i]
),
});
}
}
}
let mut result = 0.0_f64;
for i in 0..n {
let mut li = 1.0_f64;
for j in 0..n {
if j != i {
li *= (t - x_data[j]) / (x_data[i] - x_data[j]);
}
}
result += y_data[i] * li;
}
Ok(result)
}
pub fn chebyshev_nodes(a: f64, b: f64, n: usize) -> InterpolateResult<Vec<f64>> {
if n == 0 {
return Err(InterpolateError::InvalidInput {
message: "chebyshev_nodes requires n >= 1".to_string(),
});
}
if a >= b {
return Err(InterpolateError::InvalidInput {
message: format!("chebyshev_nodes requires a < b, got a={a}, b={b}"),
});
}
let mid = (a + b) * 0.5;
let half = (b - a) * 0.5;
let nf = n as f64;
let nodes: Vec<f64> = (0..n)
.map(|k| mid + half * ((2.0 * k as f64 + 1.0) * PI / (2.0 * nf)).cos())
.collect();
Ok(nodes)
}
pub fn vandermonde_matrix(x: &[f64], degree: usize) -> InterpolateResult<Array2<f64>> {
let n = x.len();
if n == 0 {
return Err(InterpolateError::InsufficientData(
"vandermonde_matrix requires at least one node".to_string(),
));
}
let cols = degree + 1;
let mut mat = Array2::zeros((n, cols));
for i in 0..n {
let mut xpow = 1.0_f64;
for j in 0..cols {
mat[[i, j]] = xpow;
xpow *= x[i];
}
}
Ok(mat)
}
pub fn bivariate_polynomial(
x_data: &[f64],
y_data: &[f64],
z_data: &[f64],
degree_x: usize,
degree_y: usize,
) -> InterpolateResult<(Array2<f64>, f64)> {
let m = x_data.len();
if m == 0 {
return Err(InterpolateError::InsufficientData(
"bivariate_polynomial requires at least one data point".to_string(),
));
}
if y_data.len() != m || z_data.len() != m {
return Err(InterpolateError::DimensionMismatch(format!(
"x_data, y_data, z_data must all have the same length (x: {}, y: {}, z: {})",
m,
y_data.len(),
z_data.len()
)));
}
let n_terms = (degree_x + 1) * (degree_y + 1);
if m < n_terms {
return Err(InterpolateError::InsufficientData(format!(
"bivariate_polynomial needs at least {} data points for degree ({degree_x},{degree_y}), got {m}",
n_terms
)));
}
let n_cols = n_terms;
let mut a = vec![0.0_f64; m * n_cols];
for row in 0..m {
let xv = x_data[row];
let yv = y_data[row];
let mut xpow = 1.0_f64;
for i in 0..=degree_x {
let mut ypow = 1.0_f64;
for j in 0..=degree_y {
let col = i * (degree_y + 1) + j;
a[row * n_cols + col] = xpow * ypow;
ypow *= yv;
}
xpow *= xv;
}
}
let c = least_squares_qr(&a, m, n_cols, z_data)?;
let mut ss = 0.0_f64;
for row in 0..m {
let mut pred = 0.0_f64;
for col in 0..n_cols {
pred += a[row * n_cols + col] * c[col];
}
let r = z_data[row] - pred;
ss += r * r;
}
let rms = (ss / m as f64).sqrt();
let mut coeffs = Array2::zeros((degree_x + 1, degree_y + 1));
for i in 0..=degree_x {
for j in 0..=degree_y {
coeffs[[i, j]] = c[i * (degree_y + 1) + j];
}
}
Ok((coeffs, rms))
}
pub fn eval_bivariate_polynomial(coeffs: &Array2<f64>, x: f64, y: f64) -> f64 {
let dx = coeffs.nrows();
let dy = coeffs.ncols();
let mut result = 0.0_f64;
let mut xpow = 1.0_f64;
for i in 0..dx {
let mut ypow = 1.0_f64;
for j in 0..dy {
result += coeffs[[i, j]] * xpow * ypow;
ypow *= y;
}
xpow *= x;
}
result
}
fn least_squares_qr(a_flat: &[f64], nrows: usize, ncols: usize, b: &[f64]) -> InterpolateResult<Vec<f64>> {
assert_eq!(a_flat.len(), nrows * ncols);
assert_eq!(b.len(), nrows);
let mut a: Vec<Vec<f64>> = (0..nrows)
.map(|i| a_flat[i * ncols..(i + 1) * ncols].to_vec())
.collect();
let mut rhs: Vec<f64> = b.to_vec();
let min_dim = nrows.min(ncols);
for k in 0..min_dim {
let col_norm_sq: f64 = (k..nrows).map(|i| a[i][k].powi(2)).sum();
if col_norm_sq < 1e-28 {
continue; }
let col_norm = col_norm_sq.sqrt();
let sigma = if a[k][k] >= 0.0 { col_norm } else { -col_norm };
let mut v: Vec<f64> = (k..nrows).map(|i| a[i][k]).collect();
v[0] += sigma;
let v_norm_sq: f64 = v.iter().map(|x| x * x).sum();
if v_norm_sq < 1e-28 {
continue;
}
let beta = 2.0 / v_norm_sq;
for j in k..ncols {
let dot: f64 = (k..nrows).map(|i| v[i - k] * a[i][j]).sum();
let c = beta * dot;
for i in k..nrows {
a[i][j] -= c * v[i - k];
}
}
let dot_rhs: f64 = (k..nrows).map(|i| v[i - k] * rhs[i]).sum();
let c = beta * dot_rhs;
for i in k..nrows {
rhs[i] -= c * v[i - k];
}
}
let mut x = vec![0.0_f64; ncols];
for k in (0..ncols).rev() {
let mut s = rhs[k];
for j in (k + 1)..ncols {
s -= a[k][j] * x[j];
}
let pivot = a[k][k];
if pivot.abs() < 1e-14 {
return Err(InterpolateError::LinalgError(format!(
"bivariate_polynomial: Vandermonde system is singular at pivot {k} (|pivot|={:.2e}). \
Reduce polynomial degree or add more data points.",
pivot.abs()
)));
}
x[k] = s / pivot;
}
Ok(x)
}
pub fn make_newton_polynomial(
x: Vec<f64>,
y: &[f64],
) -> InterpolateResult<impl Fn(f64) -> f64> {
let dd = newton_divided_differences(&x, y)?;
Ok(move |t: f64| newton_polynomial(&x, &dd, t).unwrap_or(f64::NAN))
}
pub fn chebyshev_newton_polynomial(
a: f64,
b: f64,
n: usize,
f: &dyn Fn(f64) -> f64,
) -> InterpolateResult<(Vec<f64>, Vec<f64>)> {
let nodes = chebyshev_nodes(a, b, n)?;
let values: Vec<f64> = nodes.iter().map(|&xi| f(xi)).collect();
let dd = newton_divided_differences(&nodes, &values)?;
Ok((nodes, dd))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_newton_dd_quadratic() {
let x = vec![0.0_f64, 1.0, 2.0];
let y = vec![1.0_f64, 0.0, 1.0];
let dd = newton_divided_differences(&x, &y).expect("test: should succeed");
assert!((dd[0] - 1.0).abs() < 1e-12, "dd[0]={}", dd[0]);
assert!((dd[1] - (-1.0)).abs() < 1e-12, "dd[1]={}", dd[1]);
assert!((dd[2] - 1.0).abs() < 1e-12, "dd[2]={}", dd[2]);
}
#[test]
fn test_newton_dd_constant() {
let x = vec![0.0, 1.0, 2.0, 3.0];
let y = vec![5.0, 5.0, 5.0, 5.0]; let dd = newton_divided_differences(&x, &y).expect("test: should succeed");
assert!((dd[0] - 5.0).abs() < 1e-12);
for k in 1..dd.len() {
assert!(dd[k].abs() < 1e-10, "Higher dd[{k}]={} should be 0", dd[k]);
}
}
#[test]
fn test_newton_dd_duplicate_nodes_error() {
let x = vec![0.0, 1.0, 1.0];
let y = vec![0.0, 1.0, 2.0];
assert!(newton_divided_differences(&x, &y).is_err());
}
#[test]
fn test_newton_dd_length_mismatch() {
let x = vec![0.0, 1.0];
let y = vec![0.0, 1.0, 2.0];
assert!(newton_divided_differences(&x, &y).is_err());
}
#[test]
fn test_newton_polynomial_quadratic() {
let x = vec![0.0, 1.0, 2.0];
let y = vec![1.0, 0.0, 1.0]; let dd = newton_divided_differences(&x, &y).expect("test: should succeed");
for &(t, expected) in &[(0.0, 1.0), (0.5, 0.25), (1.0, 0.0), (1.5, 0.25), (2.0, 1.0)] {
let val = newton_polynomial(&x, &dd, t).expect("test: should succeed");
assert!(
(val - expected).abs() < 1e-10,
"newton_polynomial({t}) = {val}, expected {expected}"
);
}
}
#[test]
fn test_lagrange_linear() {
let x = vec![0.0, 1.0];
let y = vec![0.0, 2.0]; for &t in &[0.0, 0.25, 0.5, 1.0] {
let val = lagrange_interpolate(&x, &y, t).expect("test: should succeed");
assert!((val - 2.0 * t).abs() < 1e-12, "t={t}: val={val}");
}
}
#[test]
fn test_lagrange_quadratic() {
let x = vec![0.0, 1.0, 2.0];
let y = vec![0.0, 1.0, 4.0]; let val = lagrange_interpolate(&x, &y, 1.5).expect("test: should succeed");
assert!((val - 2.25).abs() < 1e-10, "val={val}");
}
#[test]
fn test_lagrange_exact_at_nodes() {
let x = vec![-1.0, 0.0, 1.0, 2.0];
let y = vec![1.0, 0.0, 1.0, 4.0];
for i in 0..x.len() {
let val = lagrange_interpolate(&x, &y, x[i]).expect("test: should succeed");
assert!((val - y[i]).abs() < 1e-10, "Node {i}: val={val}, expected={}", y[i]);
}
}
#[test]
fn test_chebyshev_nodes_count() {
for n in 1..=10 {
let nodes = chebyshev_nodes(-1.0, 1.0, n).expect("test: should succeed");
assert_eq!(nodes.len(), n);
}
}
#[test]
fn test_chebyshev_nodes_bounds() {
let nodes = chebyshev_nodes(-2.0, 3.0, 10).expect("test: should succeed");
for &x in &nodes {
assert!(x > -2.0 && x < 3.0, "Node {x} out of (-2, 3)");
}
}
#[test]
fn test_chebyshev_nodes_symmetry() {
let nodes = chebyshev_nodes(-1.0, 1.0, 5).expect("test: should succeed");
let mut sorted = nodes.clone();
sorted.sort_by(|a, b| a.partial_cmp(b).expect("test: should succeed"));
for k in 0..sorted.len() / 2 {
assert!(
(sorted[k] + sorted[sorted.len() - 1 - k]).abs() < 1e-12,
"Asymmetry at k={k}"
);
}
}
#[test]
fn test_chebyshev_bad_interval() {
assert!(chebyshev_nodes(1.0, 0.0, 5).is_err()); assert!(chebyshev_nodes(0.0, 0.0, 5).is_err()); assert!(chebyshev_nodes(0.0, 1.0, 0).is_err()); }
#[test]
fn test_vandermonde_basic() {
let x = vec![0.0_f64, 1.0, 2.0];
let v = vandermonde_matrix(&x, 2).expect("test: should succeed");
assert_eq!(v.shape(), &[3, 3]);
assert!((v[[0, 0]] - 1.0).abs() < 1e-12); assert!((v[[2, 2]] - 4.0).abs() < 1e-12); }
#[test]
fn test_vandermonde_degree_0() {
let x = vec![3.0_f64, 7.0];
let v = vandermonde_matrix(&x, 0).expect("test: should succeed");
assert_eq!(v.shape(), &[2, 1]);
assert!((v[[0, 0]] - 1.0).abs() < 1e-12);
assert!((v[[1, 0]] - 1.0).abs() < 1e-12);
}
#[test]
fn test_bivariate_linear() {
let x = vec![0.0, 1.0, 0.0, 1.0, 0.5];
let y = vec![0.0, 0.0, 1.0, 1.0, 0.5];
let z: Vec<f64> = x.iter().zip(y.iter()).map(|(xi, yi)| xi + yi).collect();
let (coeffs, rms) = bivariate_polynomial(&x, &y, &z, 1, 1).expect("test: should succeed");
assert!(rms < 1e-8, "RMS={rms}");
assert!((coeffs[[1, 0]] - 1.0).abs() < 1e-6, "c[1,0]={}", coeffs[[1, 0]]);
assert!((coeffs[[0, 1]] - 1.0).abs() < 1e-6, "c[0,1]={}", coeffs[[0, 1]]);
}
#[test]
fn test_bivariate_eval() {
let mut c = Array2::zeros((2, 2));
c[[0, 0]] = 1.0;
c[[1, 0]] = 2.0;
c[[0, 1]] = 3.0;
let val = eval_bivariate_polynomial(&c, 2.0, 3.0);
assert!((val - 14.0).abs() < 1e-12, "val={val}");
}
#[test]
fn test_bivariate_too_few_points() {
let x = vec![0.0, 1.0];
let y = vec![0.0, 1.0];
let z = vec![0.0, 1.0];
assert!(bivariate_polynomial(&x, &y, &z, 1, 1).is_err());
}
#[test]
fn test_chebyshev_newton_polynomial_approx() {
let (nodes, dd) = chebyshev_newton_polynomial(0.0, std::f64::consts::PI, 8, &f64::sin)
.expect("test: should succeed");
for &t in &[0.3, 0.8, 1.5, 2.0, 2.7] {
let val = newton_polynomial(&nodes, &dd, t).expect("test: should succeed");
let expected = t.sin();
assert!(
(val - expected).abs() < 1e-5,
"sin({t}) ≈ {val}, expected {expected}"
);
}
}
#[test]
fn test_make_newton_polynomial() {
let x = vec![0.0, 1.0, 2.0, 3.0];
let y = vec![0.0, 1.0, 4.0, 9.0]; let poly = make_newton_polynomial(x, &y).expect("test: should succeed");
assert!((poly(2.5) - 6.25).abs() < 1e-8, "poly(2.5)={}", poly(2.5));
}
}