#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature="std")]
use log::trace;
#[cfg(debug_assertions)]
use approx::assert_relative_eq;
use nalgebra as na;
use na::{VectorN, MatrixN, MatrixMN};
use nalgebra::base::dimension::DimMin;
use na::{DefaultAllocator, DimName, RealField};
use na::allocator::Allocator;
use num_traits::identities::One;
#[cfg(not(feature="std"))]
macro_rules! trace {
($e:expr) => {{}};
($e:expr, $($es:expr),+) => {{}};
}
macro_rules! debug_assert_symmetric {
($mat:expr) => {
#[cfg(debug_assertions)]
{
assert_relative_eq!( $mat, &$mat.transpose(),
max_relative = na::convert(1e-5));
}
}
}
#[cfg(feature="std")]
macro_rules! pretty_print {
($arr:expr) => {{
let indent = 4;
let prefix = String::from_utf8(vec![b' '; indent]).unwrap();
let mut result_els = vec!["".to_string()];
for i in 0..$arr.nrows() {
let mut row_els = vec![];
for j in 0..$arr.ncols() {
row_els.push(format!("{:12.3}", $arr[(i,j)]));
}
let row_str = row_els.into_iter().collect::<Vec<_>>().join(" ");
let row_str = format!("{}{}", prefix, row_str);
result_els.push( row_str );
}
result_els.into_iter().collect::<Vec<_>>().join("\n")
}}
}
mod error;
pub use error::{Error, ErrorKind};
mod state_and_covariance;
pub use state_and_covariance::StateAndCovariance;
pub trait TransitionModelLinearNoControl<R, SS>
where
R: RealField,
SS: DimName,
DefaultAllocator: Allocator<R, SS, SS>,
DefaultAllocator: Allocator<R, SS>,
{
fn transition_model(&self) -> &MatrixN<R, SS>;
fn transition_model_transpose(&self) -> &MatrixN<R, SS>;
fn transition_noise_covariance(&self) -> &MatrixN<R, SS>;
fn predict(&self, previous_estimate: &StateAndCovariance<R, SS>) -> StateAndCovariance<R, SS> {
let state = self.transition_model() * previous_estimate.state();
let covariance = ((self.transition_model() * previous_estimate.covariance()) *
self.transition_model_transpose()) +
self.transition_noise_covariance();
StateAndCovariance::new(state, covariance)
}
}
pub trait ObservationModelLinear<R, SS, OS>
where
R: RealField,
SS: DimName,
OS: DimName + DimMin<OS, Output = OS>,
DefaultAllocator: Allocator<R, SS, SS>,
DefaultAllocator: Allocator<R, SS>,
DefaultAllocator: Allocator<R, OS, SS>,
DefaultAllocator: Allocator<R, SS, OS>,
DefaultAllocator: Allocator<R, OS, OS>,
DefaultAllocator: Allocator<R, OS>,
DefaultAllocator: Allocator<(usize, usize), OS>,
{
fn evaluate(&self, state: &VectorN<R,SS>) -> VectorN<R,OS>;
fn observation_matrix(&self) -> &MatrixMN<R,OS,SS>;
fn observation_matrix_transpose(&self) -> &MatrixMN<R,SS,OS>;
fn observation_noise_covariance(&self) -> &MatrixN<R,OS>;
fn update(&self,
prior: &StateAndCovariance<R,SS>,
observation: &VectorN<R,OS>,
covariance_method: CoverianceUpdateMethod,
)
-> Result<StateAndCovariance<R,SS>, Error>
{
let h = self.observation_matrix();
trace!("h {}", pretty_print!(h));
let p = prior.covariance();
trace!("p {}", pretty_print!(p));
debug_assert_symmetric!(p);
let ht = self.observation_matrix_transpose();
trace!("ht {}", pretty_print!(ht));
let r = self.observation_noise_covariance();
trace!("r {}", pretty_print!(r));
let s = (h*p*ht)+r;
trace!("s {}", pretty_print!(s));
let s_chol = match na::linalg::Cholesky::new(s) {
Some(v) => v,
None => {
return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into());
}
};
let s_inv: MatrixMN<R,OS,OS> = s_chol.inverse();
trace!("s_inv {}", pretty_print!(s_inv));
let k_gain: MatrixMN<R,SS,OS> = p * ht * s_inv;
trace!("k_gain {}", pretty_print!(k_gain));
let predicted: VectorN<R,OS> = self.evaluate(prior.state());
trace!("predicted {}", pretty_print!(predicted));
trace!("observation {}", pretty_print!(observation));
let innovation: VectorN<R,OS> = observation - predicted;
trace!("innovation {}", pretty_print!(innovation));
let state: VectorN<R,SS> = prior.state() + &k_gain * innovation;
trace!("state {}", pretty_print!(state));
trace!("self.observation_matrix() {}", pretty_print!(self.observation_matrix()));
let kh: MatrixN<R,SS> = &k_gain * self.observation_matrix();
trace!("kh {}", pretty_print!(kh));
let one_minus_kh = MatrixN::<R,SS>::one() - kh;
trace!("one_minus_kh {}", pretty_print!(one_minus_kh));
let covariance: MatrixN<R,SS> = match covariance_method {
CoverianceUpdateMethod::JosephForm => {
let left = &one_minus_kh * prior.covariance() * &one_minus_kh.transpose();
let right = &k_gain * r * &k_gain.transpose();
left + right
}
CoverianceUpdateMethod::OptimalKalman => {
one_minus_kh * prior.covariance()
}
CoverianceUpdateMethod::OptimalKalmanForcedSymmetric => {
let covariance1 = one_minus_kh * prior.covariance();
trace!("covariance1 {}", pretty_print!(covariance1));
let half: R = na::convert(0.5);
(&covariance1 + &covariance1.transpose()) * half
}
};
trace!("covariance {}", pretty_print!(covariance));
debug_assert_symmetric!(covariance);
Ok(StateAndCovariance::new(state, covariance))
}
}
#[derive(Debug,PartialEq,Clone,Copy)]
pub enum CoverianceUpdateMethod {
OptimalKalman,
OptimalKalmanForcedSymmetric,
JosephForm,
}
pub struct KalmanFilterNoControl<'a, R, SS, OS>
where
R: RealField,
SS: DimName,
OS: DimName,
{
transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>,
observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>,
}
impl<'a, R, SS, OS> KalmanFilterNoControl<'a, R, SS, OS>
where
R: RealField,
SS: DimName,
OS: DimName + DimMin<OS, Output = OS>,
DefaultAllocator: Allocator<R, SS, SS>,
DefaultAllocator: Allocator<R, SS>,
DefaultAllocator: Allocator<R, OS, SS>,
DefaultAllocator: Allocator<R, SS, OS>,
DefaultAllocator: Allocator<R, OS, OS>,
DefaultAllocator: Allocator<R, OS>,
DefaultAllocator: Allocator<(usize, usize), OS>,
{
pub fn new(
transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>,
observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>,
) -> Self {
Self {
transition_model,
observation_matrix,
}
}
pub fn step(&self, previous_estimate: &StateAndCovariance<R,SS>, observation: &VectorN<R,OS>)
-> Result<StateAndCovariance<R,SS>,Error>
{
self.step_with_options(previous_estimate, observation, CoverianceUpdateMethod::OptimalKalmanForcedSymmetric)
}
pub fn step_with_options(&self,
previous_estimate: &StateAndCovariance<R,SS>,
observation: &VectorN<R,OS>,
covariance_update_method: CoverianceUpdateMethod,
)
-> Result<StateAndCovariance<R,SS>,Error>
{
let prior = self.transition_model.predict(previous_estimate);
if observation.iter().any(|x| is_nan(*x)) {
Ok(prior)
} else {
self.observation_matrix.update(
&prior,
observation,
covariance_update_method,
)
}
}
pub fn filter_inplace(&self, initial_estimate: &StateAndCovariance<R,SS>, observations: &[VectorN<R,OS>], state_estimates: &mut [StateAndCovariance<R,SS>]) -> Result<(), Error> {
let mut previous_estimate = initial_estimate.clone();
assert!(state_estimates.len() >= observations.len());
for (this_observation, state_estimate) in observations.iter().zip(state_estimates.iter_mut()) {
let this_estimate = self.step(&previous_estimate, this_observation)?;
*state_estimate = this_estimate.clone();
previous_estimate = this_estimate;
}
Ok(())
}
#[cfg(feature="std")]
pub fn filter(&self, initial_estimate: &StateAndCovariance<R,SS>, observations: &[VectorN<R,OS>]) -> Result<Vec<StateAndCovariance<R,SS>>, Error> {
let mut state_estimates = Vec::with_capacity(observations.len());
let empty = StateAndCovariance::new(na::zero(), na::MatrixN::<R,SS>::identity());
for _ in 0..observations.len() {
state_estimates.push(empty.clone());
}
self.filter_inplace(initial_estimate, observations, &mut state_estimates)?;
Ok(state_estimates)
}
#[cfg(feature="std")]
pub fn smooth(&self, initial_estimate: &StateAndCovariance<R,SS>, observations: &[VectorN<R,OS>]) -> Result<Vec<StateAndCovariance<R,SS>>, Error> {
let forward_results = self.filter(initial_estimate, observations)?;
self.smooth_from_filtered(forward_results)
}
#[cfg(feature="std")]
pub fn smooth_from_filtered(&self, mut forward_results: Vec<StateAndCovariance<R,SS>>) -> Result<Vec<StateAndCovariance<R,SS>>,Error> {
forward_results.reverse();
let mut smoothed_backwards = Vec::with_capacity(forward_results.len());
let mut smooth_future = forward_results[0].clone();
smoothed_backwards.push(smooth_future.clone());
for filt in forward_results.iter().skip(1) {
smooth_future = self.smooth_step(&smooth_future,filt)?;
smoothed_backwards.push(smooth_future.clone());
}
smoothed_backwards.reverse();
Ok(smoothed_backwards)
}
#[cfg(feature="std")]
fn smooth_step(&self, smooth_future: &StateAndCovariance<R,SS>, filt: &StateAndCovariance<R,SS>) -> Result<StateAndCovariance<R,SS>, Error> {
let prior = self.transition_model.predict(filt);
let v_chol = match na::linalg::Cholesky::new(prior.covariance().clone()) {
Some(v) => v,
None => {
return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into());
},
};
let inv_prior_covariance: MatrixMN<R,SS,SS> = v_chol.inverse();
trace!("inv_prior_covariance {}", pretty_print!(inv_prior_covariance));
let j = filt.covariance() * (self.transition_model.transition_model_transpose() * inv_prior_covariance);
let residuals = smooth_future.state() - prior.state();
let state = filt.state() + &j*residuals;
let covar_residuals = smooth_future.covariance() - prior.covariance();
let covariance = filt.covariance() + &j * (covar_residuals * j.transpose());
Ok(StateAndCovariance::new(state, covariance ))
}
}
#[inline]
fn is_nan<R: RealField>(x: R) -> bool {
let zero = na::convert(0.0);
if x < zero {
false
} else {
if x >= zero {
false
} else {
true
}
}
}
#[test]
fn test_is_nan() {
assert_eq!(is_nan::<f64>(-1.0), false);
assert_eq!(is_nan::<f64>(0.0), false);
assert_eq!(is_nan::<f64>(1.0), false);
assert_eq!(is_nan::<f64>(1.0/0.0), false);
assert_eq!(is_nan::<f64>(-1.0/0.0), false);
assert_eq!(is_nan::<f64>(std::f64::NAN), true);
assert_eq!(is_nan::<f32>(-1.0), false);
assert_eq!(is_nan::<f32>(0.0), false);
assert_eq!(is_nan::<f32>(1.0), false);
assert_eq!(is_nan::<f32>(1.0/0.0), false);
assert_eq!(is_nan::<f32>(-1.0/0.0), false);
assert_eq!(is_nan::<f32>(std::f32::NAN), true);
}