use crate::matrix::vector::Vector;
use crate::traits::FloatScalar;
use crate::Matrix;
use super::{apply_var_floor, cholesky_with_jitter, fd_jacobian, EstimateError};
pub struct Ekf<T: FloatScalar, const N: usize, const M: usize> {
pub x: Vector<T, N>,
pub p: Matrix<T, N, N>,
min_variance: T,
gamma: T,
}
impl<T: FloatScalar, const N: usize, const M: usize> Ekf<T, N, M> {
pub fn new(x0: Vector<T, N>, p0: Matrix<T, N, N>) -> Self {
Self {
x: x0,
p: p0,
min_variance: T::zero(),
gamma: T::one(),
}
}
pub fn with_min_variance(mut self, min_variance: T) -> Self {
self.min_variance = min_variance;
self
}
pub fn with_fading_memory(mut self, gamma: T) -> Self {
self.gamma = gamma;
self
}
#[inline]
pub fn state(&self) -> &Vector<T, N> {
&self.x
}
#[inline]
pub fn covariance(&self) -> &Matrix<T, N, N> {
&self.p
}
pub fn predict(
&mut self,
f: impl Fn(&Vector<T, N>) -> Vector<T, N>,
fj: impl Fn(&Vector<T, N>) -> Matrix<T, N, N>,
q: Option<&Matrix<T, N, N>>,
) {
let big_f = fj(&self.x);
self.x = f(&self.x);
self.p = big_f * self.p * big_f.transpose() * self.gamma;
if let Some(q) = q {
self.p = self.p + *q;
}
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
}
pub fn predict_fd(
&mut self,
f: impl Fn(&Vector<T, N>) -> Vector<T, N>,
q: Option<&Matrix<T, N, N>>,
) {
let big_f = fd_jacobian(&f, &self.x);
self.x = f(&self.x);
self.p = big_f * self.p * big_f.transpose() * self.gamma;
if let Some(q) = q {
self.p = self.p + *q;
}
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
}
pub fn update(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
hj: impl Fn(&Vector<T, N>) -> Matrix<T, M, N>,
r: &Matrix<T, M, M>,
) -> Result<T, EstimateError> {
let big_h = hj(&self.x);
let y = *z - h(&self.x); let s = big_h * self.p * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = self.p * big_h.transpose() * s_inv;
let nis = (y.transpose() * s_inv * y)[(0, 0)];
self.x = self.x + k * y;
let eye: Matrix<T, N, N> = Matrix::eye();
let i_kh = eye - k * big_h;
self.p = i_kh * self.p * i_kh.transpose() + k * *r * k.transpose();
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(nis)
}
pub fn update_fd(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
r: &Matrix<T, M, M>,
) -> Result<T, EstimateError> {
let big_h = fd_jacobian(&h, &self.x);
let y = *z - h(&self.x);
let s = big_h * self.p * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = self.p * big_h.transpose() * s_inv;
let nis = (y.transpose() * s_inv * y)[(0, 0)];
self.x = self.x + k * y;
let eye: Matrix<T, N, N> = Matrix::eye();
let i_kh = eye - k * big_h;
self.p = i_kh * self.p * i_kh.transpose() + k * *r * k.transpose();
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(nis)
}
pub fn update_gated(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
hj: impl Fn(&Vector<T, N>) -> Matrix<T, M, N>,
r: &Matrix<T, M, M>,
gate: T,
) -> Result<Option<T>, EstimateError> {
let big_h = hj(&self.x);
let y = *z - h(&self.x);
let s = big_h * self.p * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let nis = (y.transpose() * s_inv * y)[(0, 0)];
if nis > gate {
return Ok(None);
}
let nis = self.update(z, h, hj, r)?;
Ok(Some(nis))
}
pub fn update_fd_gated(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
r: &Matrix<T, M, M>,
gate: T,
) -> Result<Option<T>, EstimateError> {
let big_h = fd_jacobian(&h, &self.x);
let y = *z - h(&self.x);
let s = big_h * self.p * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let nis = (y.transpose() * s_inv * y)[(0, 0)];
if nis > gate {
return Ok(None);
}
let nis = self.update_fd(z, h, r)?;
Ok(Some(nis))
}
pub fn update_iterated(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
hj: impl Fn(&Vector<T, N>) -> Matrix<T, M, N>,
r: &Matrix<T, M, M>,
max_iter: usize,
tol: T,
) -> Result<T, EstimateError> {
let x_pred = self.x;
let p_pred = self.p;
let tol_sq = tol * tol;
let mut x_iter = x_pred;
for _ in 0..max_iter {
let big_h = hj(&x_iter);
let y = *z - h(&x_iter) + big_h * (x_iter - x_pred);
let s = big_h * p_pred * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = p_pred * big_h.transpose() * s_inv;
let x_new = x_pred + k * y;
let delta = x_new - x_iter;
let sq_norm = delta.frobenius_norm_squared();
x_iter = x_new;
if sq_norm < tol_sq {
break;
}
}
let big_h = hj(&x_iter);
let s = big_h * p_pred * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = p_pred * big_h.transpose() * s_inv;
let y_final = *z - h(&x_iter);
let nis = (y_final.transpose() * s_inv * y_final)[(0, 0)];
self.x = x_iter;
let eye: Matrix<T, N, N> = Matrix::eye();
let i_kh = eye - k * big_h;
self.p = i_kh * p_pred * i_kh.transpose() + k * *r * k.transpose();
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(nis)
}
pub fn update_fd_iterated(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
r: &Matrix<T, M, M>,
max_iter: usize,
tol: T,
) -> Result<T, EstimateError> {
let x_pred = self.x;
let p_pred = self.p;
let tol_sq = tol * tol;
let mut x_iter = x_pred;
for _ in 0..max_iter {
let big_h = fd_jacobian(&h, &x_iter);
let y = *z - h(&x_iter) + big_h * (x_iter - x_pred);
let s = big_h * p_pred * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = p_pred * big_h.transpose() * s_inv;
let x_new = x_pred + k * y;
let delta = x_new - x_iter;
let sq_norm = delta.frobenius_norm_squared();
x_iter = x_new;
if sq_norm < tol_sq {
break;
}
}
let big_h = fd_jacobian(&h, &x_iter);
let s = big_h * p_pred * big_h.transpose() + *r;
let s_inv = cholesky_with_jitter(&s)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = p_pred * big_h.transpose() * s_inv;
let y_final = *z - h(&x_iter);
let nis = (y_final.transpose() * s_inv * y_final)[(0, 0)];
self.x = x_iter;
let eye: Matrix<T, N, N> = Matrix::eye();
let i_kh = eye - k * big_h;
self.p = i_kh * p_pred * i_kh.transpose() + k * *r * k.transpose();
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(nis)
}
}