1extern crate alloc;
2use alloc::vec::Vec;
3
4use crate::matrix::vector::ColumnVector;
5use crate::traits::FloatScalar;
6use crate::Matrix;
7
8use super::{apply_var_floor, cholesky_with_jitter, EstimateError};
9
10pub struct Ckf<T: FloatScalar, const N: usize, const M: usize> {
45 pub x: ColumnVector<T, N>,
47 pub p: Matrix<T, N, N>,
49 min_variance: T,
51 gamma: T,
53}
54
55impl<T: FloatScalar, const N: usize, const M: usize> Ckf<T, N, M> {
56 pub fn new(x0: ColumnVector<T, N>, p0: Matrix<T, N, N>) -> Self {
58 Self {
59 x: x0,
60 p: p0,
61 min_variance: T::zero(),
62 gamma: T::one(),
63 }
64 }
65
66 pub fn with_min_variance(mut self, min_variance: T) -> Self {
68 self.min_variance = min_variance;
69 self
70 }
71
72 pub fn with_fading_memory(mut self, gamma: T) -> Self {
76 self.gamma = gamma;
77 self
78 }
79
80 #[inline]
82 pub fn state(&self) -> &ColumnVector<T, N> {
83 &self.x
84 }
85
86 #[inline]
88 pub fn covariance(&self) -> &Matrix<T, N, N> {
89 &self.p
90 }
91
92 fn cubature_points(
94 x: &ColumnVector<T, N>,
95 l: &Matrix<T, N, N>,
96 ) -> Vec<ColumnVector<T, N>> {
97 let sqrt_n = T::from(N).unwrap().sqrt();
98 let mut points = Vec::with_capacity(2 * N);
99
100 for i in 0..N {
101 let mut pos = *x;
103 let mut neg = *x;
104 for r in 0..N {
105 let offset = sqrt_n * l[(r, i)];
106 pos[(r, 0)] = pos[(r, 0)] + offset;
107 neg[(r, 0)] = neg[(r, 0)] - offset;
108 }
109 points.push(pos);
110 points.push(neg);
111 }
112
113 points
114 }
115
116 pub fn predict(
124 &mut self,
125 f: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, N>,
126 q: Option<&Matrix<T, N, N>>,
127 ) -> Result<(), EstimateError> {
128 let chol = cholesky_with_jitter(&self.p)?;
129 let l = chol.l_full();
130
131 let points = Self::cubature_points(&self.x, &l);
132 let w = T::one() / T::from(2 * N).unwrap();
133
134 let mut propagated: Vec<ColumnVector<T, N>> = Vec::with_capacity(2 * N);
136 let mut x_mean = ColumnVector::<T, N>::zeros();
137
138 for pt in &points {
139 let fp = f(pt);
140 for r in 0..N {
141 x_mean[(r, 0)] = x_mean[(r, 0)] + w * fp[(r, 0)];
142 }
143 propagated.push(fp);
144 }
145
146 let mut p_new = Matrix::<T, N, N>::zeros();
148 for pt in &propagated {
149 let d = *pt - x_mean;
150 for r in 0..N {
151 for c in 0..N {
152 p_new[(r, c)] = p_new[(r, c)] + w * d[(r, 0)] * d[(c, 0)];
153 }
154 }
155 }
156
157 p_new = p_new * self.gamma;
159 self.x = x_mean;
160 self.p = if let Some(q) = q { p_new + *q } else { p_new };
161 let half = T::from(0.5).unwrap();
162 self.p = (self.p + self.p.transpose()) * half;
163 apply_var_floor(&mut self.p, self.min_variance);
164
165 Ok(())
166 }
167
168 pub fn update(
178 &mut self,
179 z: &ColumnVector<T, M>,
180 h: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, M>,
181 r: &Matrix<T, M, M>,
182 ) -> Result<T, EstimateError> {
183 let chol = cholesky_with_jitter(&self.p)?;
184 let l = chol.l_full();
185
186 let points = Self::cubature_points(&self.x, &l);
187 let w = T::one() / T::from(2 * N).unwrap();
188
189 let mut z_points: Vec<ColumnVector<T, M>> = Vec::with_capacity(2 * N);
191 let mut z_mean = ColumnVector::<T, M>::zeros();
192
193 for pt in &points {
194 let zp = h(pt);
195 for r in 0..M {
196 z_mean[(r, 0)] = z_mean[(r, 0)] + w * zp[(r, 0)];
197 }
198 z_points.push(zp);
199 }
200
201 let mut s_mat = Matrix::<T, M, M>::zeros();
203 for zp in &z_points {
204 let dz = *zp - z_mean;
205 for ri in 0..M {
206 for ci in 0..M {
207 s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[(ri, 0)] * dz[(ci, 0)];
208 }
209 }
210 }
211 s_mat = s_mat + *r;
212
213 let mut pxz = Matrix::<T, N, M>::zeros();
215 for (pt, zp) in points.iter().zip(z_points.iter()) {
216 let dx = *pt - self.x;
217 let dz = *zp - z_mean;
218 for ri in 0..N {
219 for ci in 0..M {
220 pxz[(ri, ci)] = pxz[(ri, ci)] + w * dx[(ri, 0)] * dz[(ci, 0)];
221 }
222 }
223 }
224
225 let s_inv = cholesky_with_jitter(&s_mat)
227 .map_err(|_| EstimateError::SingularInnovation)?
228 .inverse();
229 let k = pxz * s_inv;
230
231 let innovation = *z - z_mean;
233 let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
234
235 self.x = self.x + k * innovation;
237 self.p = self.p - k * s_mat * k.transpose();
238 let half = T::from(0.5).unwrap();
239 self.p = (self.p + self.p.transpose()) * half;
240 apply_var_floor(&mut self.p, self.min_variance);
241
242 Ok(nis)
243 }
244
245 pub fn update_gated(
251 &mut self,
252 z: &ColumnVector<T, M>,
253 h: impl Fn(&ColumnVector<T, N>) -> ColumnVector<T, M>,
254 r: &Matrix<T, M, M>,
255 gate: T,
256 ) -> Result<Option<T>, EstimateError> {
257 let chol = cholesky_with_jitter(&self.p)?;
259 let l = chol.l_full();
260
261 let points = Self::cubature_points(&self.x, &l);
262 let w = T::one() / T::from(2 * N).unwrap();
263
264 let mut z_points: Vec<ColumnVector<T, M>> = Vec::with_capacity(2 * N);
265 let mut z_mean = ColumnVector::<T, M>::zeros();
266
267 for pt in &points {
268 let zp = h(pt);
269 for r_i in 0..M {
270 z_mean[(r_i, 0)] = z_mean[(r_i, 0)] + w * zp[(r_i, 0)];
271 }
272 z_points.push(zp);
273 }
274
275 let mut s_mat = Matrix::<T, M, M>::zeros();
276 for zp in &z_points {
277 let dz = *zp - z_mean;
278 for ri in 0..M {
279 for ci in 0..M {
280 s_mat[(ri, ci)] = s_mat[(ri, ci)] + w * dz[(ri, 0)] * dz[(ci, 0)];
281 }
282 }
283 }
284 s_mat = s_mat + *r;
285
286 let s_inv = cholesky_with_jitter(&s_mat)
287 .map_err(|_| EstimateError::SingularInnovation)?
288 .inverse();
289 let innovation = *z - z_mean;
290 let nis = (innovation.transpose() * s_inv * innovation)[(0, 0)];
291
292 if nis > gate {
293 return Ok(None);
294 }
295 let nis = self.update(z, h, r)?;
296 Ok(Some(nis))
297 }
298}