use crate::matrix::vector::Vector;
use crate::traits::FloatScalar;
use crate::Matrix;
use super::EstimateError;
pub struct BatchLsq<T: FloatScalar, const N: usize> {
info: Matrix<T, N, N>,
eta: Vector<T, N>,
}
impl<T: FloatScalar, const N: usize> BatchLsq<T, N> {
pub fn new() -> Self {
Self {
info: Matrix::zeros(),
eta: Vector::zeros(),
}
}
pub fn with_prior(
x0: &Vector<T, N>,
p0: &Matrix<T, N, N>,
) -> Result<Self, EstimateError> {
let p0_inv = p0
.cholesky()
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let eta = p0_inv * *x0;
Ok(Self {
info: p0_inv,
eta,
})
}
pub fn add_observation<const M: usize>(
&mut self,
z: &Vector<T, M>,
h: &Matrix<T, M, N>,
r: &Matrix<T, M, M>,
) -> Result<(), EstimateError> {
let r_inv = r
.cholesky()
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let ht = h.transpose(); let ht_rinv = ht * r_inv; self.info = self.info + ht_rinv * *h; self.eta = self.eta + ht_rinv * *z; Ok(())
}
pub fn solve(&self) -> Result<(Vector<T, N>, Matrix<T, N, N>), EstimateError> {
let p = self
.info
.cholesky()
.map_err(|_| EstimateError::SingularInnovation)?
.inverse();
let x = p * self.eta;
Ok((x, p))
}
#[inline]
pub fn information_matrix(&self) -> &Matrix<T, N, N> {
&self.info
}
#[inline]
pub fn information_vector(&self) -> &Vector<T, N> {
&self.eta
}
pub fn reset(&mut self) {
self.info = Matrix::zeros();
self.eta = Vector::zeros();
}
}