extern crate alloc;
use alloc::vec::Vec;
use crate::matrix::vector::Vector;
use crate::traits::FloatScalar;
use crate::Matrix;
use super::{apply_var_floor, cholesky_with_jitter, EstimateError};
pub struct Ckf<T: FloatScalar, const N: usize, const M: usize> {
pub x: Vector<T, N>,
pub p: Matrix<T, N, N>,
min_variance: T,
gamma: T,
}
impl<T: FloatScalar, const N: usize, const M: usize> Ckf<T, N, M> {
pub fn new(x0: Vector<T, N>, p0: Matrix<T, N, N>) -> Self {
Self {
x: x0,
p: p0,
min_variance: T::zero(),
gamma: T::one(),
}
}
pub fn with_min_variance(mut self, min_variance: T) -> Self {
self.min_variance = min_variance;
self
}
pub fn with_fading_memory(mut self, gamma: T) -> Self {
self.gamma = gamma;
self
}
#[inline]
pub fn state(&self) -> &Vector<T, N> {
&self.x
}
#[inline]
pub fn covariance(&self) -> &Matrix<T, N, N> {
&self.p
}
fn cubature_points(
x: &Vector<T, N>,
l: &Matrix<T, N, N>,
) -> Vec<Vector<T, N>> {
let sqrt_n = T::from(N).unwrap().sqrt();
let mut points = Vec::with_capacity(2 * N);
for i in 0..N {
let mut pos = *x;
let mut neg = *x;
for r in 0..N {
let offset = sqrt_n * l[(r, i)];
pos[r] = pos[r] + offset;
neg[r] = neg[r] - offset;
}
points.push(pos);
points.push(neg);
}
points
}
pub fn predict(
&mut self,
f: impl Fn(&Vector<T, N>) -> Vector<T, N>,
q: Option<&Matrix<T, N, N>>,
) -> Result<(), EstimateError> {
let chol = cholesky_with_jitter(&self.p)?;
let l = chol.l_full();
let points = Self::cubature_points(&self.x, &l);
let w = T::one() / T::from(2 * N).unwrap();
let mut propagated: Vec<Vector<T, N>> = Vec::with_capacity(2 * N);
let mut x_mean = Vector::<T, N>::zeros();
for pt in &points {
let fp = f(pt);
for r in 0..N {
x_mean[r] = x_mean[r] + w * fp[r];
}
propagated.push(fp);
}
let mut p_new = Matrix::<T, N, N>::zeros();
for pt in &propagated {
let d = *pt - x_mean;
for r in 0..N {
for c in 0..N {
p_new[(r, c)] = p_new[(r, c)] + w * d[r] * d[c];
}
}
}
p_new = p_new * self.gamma;
self.x = x_mean;
self.p = if let Some(q) = q { p_new + *q } else { p_new };
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(())
}
pub fn update(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
r: &Matrix<T, M, M>,
) -> Result<T, EstimateError> {
let chol = cholesky_with_jitter(&self.p)?;
let l = chol.l_full();
let points = Self::cubature_points(&self.x, &l);
let w = T::one() / T::from(2 * N).unwrap();
let mut z_points: Vec<Vector<T, M>> = Vec::with_capacity(2 * N);
let mut z_mean = Vector::<T, M>::zeros();
for pt in &points {
let zp = h(pt);
for r in 0..M {
z_mean[r] = z_mean[r] + w * zp[r];
}
z_points.push(zp);
}
let mut s_mat = Matrix::<T, M, M>::zeros();
for zp in &z_points {
let dz = *zp - z_mean;
for ri in 0..M {
for ci in 0..M {
s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[ri] * dz[ci];
}
}
}
s_mat = s_mat + *r;
let mut pxz = Matrix::<T, N, M>::zeros();
for (pt, zp) in points.iter().zip(z_points.iter()) {
let dx = *pt - self.x;
let dz = *zp - z_mean;
for ri in 0..N {
for ci in 0..M {
pxz[(ri, ci)] = pxz[(ri, ci)] + w * dx[ri] * dz[ci];
}
}
}
let s_inv = cholesky_with_jitter(&s_mat)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let k = pxz * s_inv;
let innovation = *z - z_mean;
let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
self.x = self.x + k * innovation;
self.p = self.p - k * s_mat * k.transpose();
let half = T::from(0.5).unwrap();
self.p = (self.p + self.p.transpose()) * half;
apply_var_floor(&mut self.p, self.min_variance);
Ok(nis)
}
pub fn update_gated(
&mut self,
z: &Vector<T, M>,
h: impl Fn(&Vector<T, N>) -> Vector<T, M>,
r: &Matrix<T, M, M>,
gate: T,
) -> Result<Option<T>, EstimateError> {
let chol = cholesky_with_jitter(&self.p)?;
let l = chol.l_full();
let points = Self::cubature_points(&self.x, &l);
let w = T::one() / T::from(2 * N).unwrap();
let mut z_points: Vec<Vector<T, M>> = Vec::with_capacity(2 * N);
let mut z_mean = Vector::<T, M>::zeros();
for pt in &points {
let zp = h(pt);
for r_i in 0..M {
z_mean[r_i] = z_mean[r_i] + w * zp[r_i];
}
z_points.push(zp);
}
let mut s_mat = Matrix::<T, M, M>::zeros();
for zp in &z_points {
let dz = *zp - z_mean;
for ri in 0..M {
for ci in 0..M {
s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[ri] * dz[ci];
}
}
}
s_mat = s_mat + *r;
let s_inv = cholesky_with_jitter(&s_mat)
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let innovation = *z - z_mean;
let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
if nis > gate {
return Ok(None);
}
let nis = self.update(z, h, r)?;
Ok(Some(nis))
}
}