use crate::traits::FloatScalar;
use super::{InterpError, validate_sorted};
#[derive(Debug, Clone)]
pub struct LagrangeInterp<T, const N: usize> {
xs: [T; N],
ys: [T; N],
ws: [T; N], }
impl<T: FloatScalar, const N: usize> LagrangeInterp<T, N> {
pub fn new(xs: [T; N], ys: [T; N]) -> Result<Self, InterpError> {
if N < 2 {
return Err(InterpError::TooFewPoints);
}
validate_sorted(&xs)?;
let mut ws = [T::one(); N];
for j in 0..N {
for k in 0..N {
if k != j {
ws[j] = ws[j] / (xs[j] - xs[k]);
}
}
}
Ok(Self { xs, ys, ws })
}
pub fn eval(&self, x: T) -> T {
barycentric_eval(&self.xs, &self.ys, &self.ws, x)
}
pub fn eval_derivative(&self, x: T) -> (T, T) {
barycentric_eval_deriv(&self.xs, &self.ys, &self.ws, x)
}
pub fn xs(&self) -> &[T; N] {
&self.xs
}
pub fn ys(&self) -> &[T; N] {
&self.ys
}
}
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
#[derive(Debug, Clone)]
pub struct DynLagrangeInterp<T> {
xs: Vec<T>,
ys: Vec<T>,
ws: Vec<T>,
}
#[cfg(feature = "alloc")]
impl<T: FloatScalar> DynLagrangeInterp<T> {
pub fn new(xs: Vec<T>, ys: Vec<T>) -> Result<Self, InterpError> {
if xs.len() != ys.len() {
return Err(InterpError::LengthMismatch);
}
if xs.len() < 2 {
return Err(InterpError::TooFewPoints);
}
validate_sorted(&xs)?;
let n = xs.len();
let mut ws = alloc::vec![T::one(); n];
for j in 0..n {
for k in 0..n {
if k != j {
ws[j] = ws[j] / (xs[j] - xs[k]);
}
}
}
Ok(Self { xs, ys, ws })
}
pub fn eval(&self, x: T) -> T {
barycentric_eval(&self.xs, &self.ys, &self.ws, x)
}
pub fn eval_derivative(&self, x: T) -> (T, T) {
barycentric_eval_deriv(&self.xs, &self.ys, &self.ws, x)
}
pub fn xs(&self) -> &[T] {
&self.xs
}
pub fn ys(&self) -> &[T] {
&self.ys
}
}
fn barycentric_eval<T: FloatScalar>(xs: &[T], ys: &[T], ws: &[T], x: T) -> T {
let eps = T::epsilon() * T::from(1e3).unwrap_or(T::epsilon());
let mut numer = T::zero();
let mut denom = T::zero();
for j in 0..xs.len() {
let diff = x - xs[j];
if diff.abs() < eps {
return ys[j];
}
let term = ws[j] / diff;
numer = numer + term * ys[j];
denom = denom + term;
}
numer / denom
}
fn barycentric_eval_deriv<T: FloatScalar>(xs: &[T], ys: &[T], ws: &[T], x: T) -> (T, T) {
let eps = T::epsilon() * T::from(1e3).unwrap_or(T::epsilon());
for j in 0..xs.len() {
let diff = x - xs[j];
if diff.abs() < eps {
let mut deriv = T::zero();
for k in 0..xs.len() {
if k != j {
deriv = deriv - ws[k] / ws[j] * (ys[j] - ys[k]) / (xs[j] - xs[k]);
}
}
return (ys[j], deriv);
}
}
let mut n = T::zero();
let mut d = T::zero();
let mut np = T::zero();
let mut dp = T::zero();
for j in 0..xs.len() {
let diff = x - xs[j];
let inv = T::one() / diff;
let inv2 = inv * inv;
let wj = ws[j];
let term = wj * inv;
n = n + term * ys[j];
d = d + term;
np = np - wj * inv2 * ys[j];
dp = dp - wj * inv2;
}
let val = n / d;
let dval = (np * d - n * dp) / (d * d);
(val, dval)
}