Skip to main content

numeris/estimate/
ckf.rs

1extern crate alloc;
2use alloc::vec::Vec;
3
4use crate::matrix::vector::ColumnVector;
5use crate::traits::FloatScalar;
6use crate::Matrix;
7
8use super::{apply_var_floor, cholesky_with_jitter, EstimateError};
9
10/// Cubature Kalman Filter with third-degree spherical-radial cubature rule.
11///
12/// Uses `2N` cubature points with equal weight `1/(2N)`. Unlike the UKF,
13/// the CKF has **no tuning parameters** — the cubature rule is uniquely
14/// determined by the state dimension.
15///
16/// `N` is the state dimension, `M` is the measurement dimension.
17/// Requires the `alloc` feature for temporary cubature point storage.
18///
19/// # Example
20///
21/// ```
22/// use numeris::estimate::Ckf;
23/// use numeris::{ColumnVector, Matrix};
24///
25/// let x0 = ColumnVector::from_column([0.0_f64, 1.0]);
26/// let p0 = Matrix::new([[1.0, 0.0], [0.0, 1.0]]);
27/// let mut ckf = Ckf::<f64, 2, 1>::new(x0, p0);
28///
29/// let dt = 0.1;
30/// let q = Matrix::new([[0.01, 0.0], [0.0, 0.01]]);
31/// let r = Matrix::new([[0.5]]);
32///
33/// ckf.predict(
34///     |x| ColumnVector::from_column([x[(0, 0)] + dt * x[(1, 0)], x[(1, 0)]]),
35///     Some(&q),
36/// ).unwrap();
37///
38/// ckf.update(
39///     &ColumnVector::from_column([0.12]),
40///     |x| ColumnVector::from_column([x[(0, 0)]]),
41///     &r,
42/// ).unwrap();
43/// ```
44pub struct Ckf<T: FloatScalar, const N: usize, const M: usize> {
45    /// State estimate.
46    pub x: ColumnVector<T, N>,
47    /// State covariance.
48    pub p: Matrix<T, N, N>,
49    /// Minimum allowed diagonal variance (0 = disabled).
50    min_variance: T,
51    /// Fading-memory factor γ≥1 applied to the sigma-point covariance (1 = standard).
52    gamma: T,
53}
54
55impl<T: FloatScalar, const N: usize, const M: usize> Ckf<T, N, M> {
56    /// Create a new CKF with initial state `x0` and covariance `p0`.
57    pub fn new(x0: ColumnVector<T, N>, p0: Matrix<T, N, N>) -> Self {
58        Self {
59            x: x0,
60            p: p0,
61            min_variance: T::zero(),
62            gamma: T::one(),
63        }
64    }
65
66    /// Set a minimum diagonal variance floor applied after every predict/update.
67    pub fn with_min_variance(mut self, min_variance: T) -> Self {
68        self.min_variance = min_variance;
69        self
70    }
71
72    /// Set a fading-memory factor `γ ≥ 1` applied to the propagated covariance.
73    ///
74    /// The predicted covariance becomes `γ · P_cubature + Q`. Default is `1.0`.
75    pub fn with_fading_memory(mut self, gamma: T) -> Self {
76        self.gamma = gamma;
77        self
78    }
79
80    /// Reference to the current state estimate.
81    #[inline]
82    pub fn state(&self) -> &ColumnVector<T, N> {
83        &self.x
84    }
85
86    /// Reference to the current state covariance.
87    #[inline]
88    pub fn covariance(&self) -> &Matrix<T, N, N> {
89        &self.p
90    }
91
92    /// Generate 2N cubature points from state and Cholesky factor.
93    fn cubature_points(
94        x: &ColumnVector<T, N>,
95        l: &Matrix<T, N, N>,
96    ) -> Vec<ColumnVector<T, N>> {
97        let sqrt_n = T::from(N).unwrap().sqrt();
98        let mut points = Vec::with_capacity(2 * N);
99
100        for i in 0..N {
101            // x ± sqrt(N) · L·e_i = x ± sqrt(N) · col_i(L)
102            let mut pos = *x;
103            let mut neg = *x;
104            for r in 0..N {
105                let offset = sqrt_n * l[(r, i)];
106                pos[(r, 0)] = pos[(r, 0)] + offset;
107                neg[(r, 0)] = neg[(r, 0)] - offset;
108            }
109            points.push(pos);
110            points.push(neg);
111        }
112
113        points
114    }
115
116    /// Predict step.
117    ///
118    /// - `f` — state transition: `x_{k+1} = f(x_k)`
119    /// - `q` — process noise covariance (pass `None` for zero process noise)
120    ///
121    /// Generates `2N` cubature points, propagates through `f`, and
122    /// reconstructs the predicted mean and covariance as `γ · P_cubature + Q`.
123    pub fn predict(
124        &mut self,
125        f: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, N>,
126        q: Option<&Matrix<T, N, N>>,
127    ) -> Result<(), EstimateError> {
128        let chol = cholesky_with_jitter(&self.p)?;
129        let l = chol.l_full();
130
131        let points = Self::cubature_points(&self.x, &l);
132        let w = T::one() / T::from(2 * N).unwrap();
133
134        // Propagate and compute mean
135        let mut propagated: Vec<ColumnVector<T, N>> = Vec::with_capacity(2 * N);
136        let mut x_mean = ColumnVector::<T, N>::zeros();
137
138        for pt in &points {
139            let fp = f(pt);
140            for r in 0..N {
141                x_mean[(r, 0)] = x_mean[(r, 0)] + w * fp[(r, 0)];
142            }
143            propagated.push(fp);
144        }
145
146        // Cubature covariance (without Q)
147        let mut p_new = Matrix::<T, N, N>::zeros();
148        for pt in &propagated {
149            let d = *pt - x_mean;
150            for r in 0..N {
151                for c in 0..N {
152                    p_new[(r, c)] = p_new[(r, c)] + w * d[(r, 0)] * d[(c, 0)];
153                }
154            }
155        }
156
157        // Fading memory: scale cubature covariance by γ before adding Q.
158        p_new = p_new * self.gamma;
159        self.x = x_mean;
160        self.p = if let Some(q) = q { p_new + *q } else { p_new };
161        let half = T::from(0.5).unwrap();
162        self.p = (self.p + self.p.transpose()) * half;
163        apply_var_floor(&mut self.p, self.min_variance);
164
165        Ok(())
166    }
167
168    /// Update step.
169    ///
170    /// - `z` — measurement vector
171    /// - `h` — measurement model: `z = h(x)`
172    /// - `r` — measurement noise covariance
173    ///
174    /// Covariance update uses the symmetric `P - K S Kᵀ` form.
175    ///
176    /// Returns the Normalized Innovation Squared (NIS): `yᵀ S⁻¹ y`.
177    pub fn update(
178        &mut self,
179        z: &ColumnVector<T, M>,
180        h: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, M>,
181        r: &Matrix<T, M, M>,
182    ) -> Result<T, EstimateError> {
183        let chol = cholesky_with_jitter(&self.p)?;
184        let l = chol.l_full();
185
186        let points = Self::cubature_points(&self.x, &l);
187        let w = T::one() / T::from(2 * N).unwrap();
188
189        // Transform through measurement model
190        let mut z_points: Vec<ColumnVector<T, M>> = Vec::with_capacity(2 * N);
191        let mut z_mean = ColumnVector::<T, M>::zeros();
192
193        for pt in &points {
194            let zp = h(pt);
195            for r in 0..M {
196                z_mean[(r, 0)] = z_mean[(r, 0)] + w * zp[(r, 0)];
197            }
198            z_points.push(zp);
199        }
200
201        // Innovation covariance S = (1/2N) Σ (z_i - z̄)(z_i - z̄)ᵀ + R
202        let mut s_mat = Matrix::<T, M, M>::zeros();
203        for zp in &z_points {
204            let dz = *zp - z_mean;
205            for ri in 0..M {
206                for ci in 0..M {
207                    s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[(ri, 0)] * dz[(ci, 0)];
208                }
209            }
210        }
211        s_mat = s_mat + *r;
212
213        // Cross-covariance Pxz = (1/2N) Σ (x_i - x̄)(z_i - z̄)ᵀ
214        let mut pxz = Matrix::<T, N, M>::zeros();
215        for (pt, zp) in points.iter().zip(z_points.iter()) {
216            let dx = *pt - self.x;
217            let dz = *zp - z_mean;
218            for ri in 0..N {
219                for ci in 0..M {
220                    pxz[(ri, ci)] = pxz[(ri, ci)] + w * dx[(ri, 0)] * dz[(ci, 0)];
221                }
222            }
223        }
224
225        // Kalman gain: K = Pxz · S⁻¹
226        let s_inv = cholesky_with_jitter(&s_mat)
227            .map_err(|_| EstimateError::SingularInnovation)?
228            .inverse();
229        let k = pxz * s_inv;
230
231        // Innovation and NIS = yᵀ S⁻¹ y
232        let innovation = *z - z_mean;
233        let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
234
235        // Update state and covariance: P = P - K·S·Kᵀ (symmetric, manifestly PSD-subtracted)
236        self.x = self.x + k * innovation;
237        self.p = self.p - k * s_mat * k.transpose();
238        let half = T::from(0.5).unwrap();
239        self.p = (self.p + self.p.transpose()) * half;
240        apply_var_floor(&mut self.p, self.min_variance);
241
242        Ok(nis)
243    }
244
245    /// Update with innovation gating — skips state update if NIS exceeds `gate`.
246    ///
247    /// Returns `Ok(None)` when rejected, `Ok(Some(nis))` when accepted.
248    ///
249    /// Chi-squared thresholds: M=1 → 99%: 6.63 | M=2 → 9.21 | M=3 → 11.34
250    pub fn update_gated(
251        &mut self,
252        z: &ColumnVector<T, M>,
253        h: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, M>,
254        r: &Matrix<T, M, M>,
255        gate: T,
256    ) -> Result<Option<T>, EstimateError> {
257        // Compute NIS without modifying state.
258        let chol = cholesky_with_jitter(&self.p)?;
259        let l = chol.l_full();
260
261        let points = Self::cubature_points(&self.x, &l);
262        let w = T::one() / T::from(2 * N).unwrap();
263
264        let mut z_points: Vec<ColumnVector<T, M>> = Vec::with_capacity(2 * N);
265        let mut z_mean = ColumnVector::<T, M>::zeros();
266
267        for pt in &points {
268            let zp = h(pt);
269            for r_i in 0..M {
270                z_mean[(r_i, 0)] = z_mean[(r_i, 0)] + w * zp[(r_i, 0)];
271            }
272            z_points.push(zp);
273        }
274
275        let mut s_mat = Matrix::<T, M, M>::zeros();
276        for zp in &z_points {
277            let dz = *zp - z_mean;
278            for ri in 0..M {
279                for ci in 0..M {
280                    s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[(ri, 0)] * dz[(ci, 0)];
281                }
282            }
283        }
284        s_mat = s_mat + *r;
285
286        let s_inv = cholesky_with_jitter(&s_mat)
287            .map_err(|_| EstimateError::SingularInnovation)?
288            .inverse();
289        let innovation = *z - z_mean;
290        let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
291
292        if nis > gate {
293            return Ok(None);
294        }
295        let nis = self.update(z, h, r)?;
296        Ok(Some(nis))
297    }
298}