/// SLAM : Simultaneous Localization and Mapping.
///
/// FastSLAM augmented particle algorithm see Ref[1].
///
/// [1] "FastSLAM: Factored Solution to the Simultaneous Localization and Mapping Problem"
/// Montemerlo, M. and Thrun, S. and Koller, D. and Wegbreit, B. Proceedings of the AAAI National Conference on Artificial Intelligence 2002
use std::collections::HashMap;
use nalgebra::{Dynamic, RealField, U1, VectorN, MatrixMN, MatrixN, Dim, DefaultAllocator, DimName};
use nalgebra::storage::Storage;
use bayes_estimate::estimators::sir;
use bayes_estimate::estimators::sir::SampleState;
use bayes_estimate::models::{KalmanEstimator, KalmanState};
use bayes_estimate::noise::CorrelatedNoise;
use num_traits::ToPrimitive;
use nalgebra::allocator::Allocator;
/// Fast SLAM state.
///
/// A sample state of the location with augmented Kalman states of map features.
pub struct FastSLAM<N: RealField, DL: Dim, DF: Dim>
where DefaultAllocator: Allocator<N, DL, DL> + Allocator<N, DL> + Allocator<N, DF, DF> + Allocator<N, DF>
{
/// Location as a sample state.
pub loc: SampleState<N, DL>,
/// Hashmap of augmented map feature states.
pub map: FeatureMap<N, DF>,
}
pub type Feature<N, DF> = KalmanState<N, DF>;
pub type FeatureCondMap<N, DF> = Vec<Feature<N, DF>>;
type FeatureMap<N, DF> = HashMap<u32, FeatureCondMap<N, DF>>;
impl<N: RealField + ToPrimitive, DL: Dim, DF: DimName> FastSLAM<N, DL, DF>
where DefaultAllocator: Allocator<N, DL, DL> + Allocator<N, DL> + Allocator<N, DF, DF> + Allocator<N, U1, DF> + Allocator<N, DF>
{
/// Creates a [FastSLAM] with empty feature map.
pub fn new_equal_likelihood(loc: SampleState<N, DL>) -> Self {
FastSLAM {
loc,
map: FeatureMap::new(),
}
}
/// SLAM new feature observation (overwrite).
///
/// Assumes there is no prior information about the feature (strictly a uniform un-informative prior)
/// This implies:
/// a) There has no information about location so no resampling is requires
/// b) Feature state estimates comes directly from the location and observation
pub fn observe_new(&mut self, feature: u32, obs_model: impl Fn(&VectorN<N, DL>) -> Feature<N,DF>) {
let fmap: FeatureCondMap<N,DF> = self.loc.s.iter().map(|s| obs_model(s)).collect();
self.map.insert(feature, fmap);
}
/// SLAM Feature observation.
/// Uses Extended Fast_SLAM observation equations
/// Note: Mathematically only weight ratios are important. Numerically however the range should be restricted.
/// The weights are computed here using the simplest form with common factor Ht removed.
pub fn observe<DZ: Dim>(&mut self, feature: u32,
innovation_model: impl Fn(&VectorN<N, DL>, &VectorN<N, DF>) -> (VectorN<N, DZ>, CorrelatedNoise<N, DZ>),
hx: &MatrixMN<N, DZ, DF>)
where DefaultAllocator: Allocator<N, U1, DF> +
Allocator<N, DZ, DZ> + Allocator<N, DZ, DF> + Allocator<N, DF, DZ> + Allocator<N, DZ>
{
// Existing feature - reference the associated feature map
let afm = self.map.get_mut(&feature).unwrap();
let nsamples = self.loc.s.len();
// Iterate over samples
for si in 0..nsamples {
let m1 = &mut afm[si]; // Associated feature's map particle
let loc = &self.loc.s[si];
// Observation innovatio and noise
let (innov, noise) = innovation_model(loc, &m1.x);
// Innovation variance hx.X.hx' + Q
let mut cov = noise.Q.clone();
cov.quadform_tr(N::one(), hx, &m1.X, N::one());
// Inverse innovation covariance
let s_cholesky = cov.clone().cholesky().unwrap();
let sinv = s_cholesky.inverse();
// Multiplicative fusion of observation weights, integral of Gaussian product g(p,P)*g(q,Q)
// Drop Ht which is a common factor for all weights
let sinv_diagonal = s_cholesky.l_dirty().iter().step_by(innov.nrows()+1);
let determinate_sinv = sinv_diagonal.fold(N::one(), |prod: N, n: &N| prod * *n).to_f32().unwrap();
let logl = innov.dot(&(&sinv * &innov)).to_f32().unwrap();
self.loc.w[si] *= (logl.exp() * determinate_sinv).powf(-0.5);
// Estimate conditional state of feature
let w = &m1.X * hx.transpose() * &sinv; // EKF for conditional feature observation - specialised for 1D and zero state uncertianty
m1.x += &w * &innov;
m1.X.quadform_tr(-N::one(), &w, &cov, N::one());
}
}
/// Forget all feature information, feature no can be reused for a new feature
pub fn forget(&mut self, feature: u32) -> Option<FeatureCondMap<N,DF>>
{
self.map.remove(&feature)
}
/// Resampling Update.
///
/// Resample particles using weights. Propagate resampling to All features.
/// Only resamples if weights have been updated.
pub fn update_resample(&mut self, resampler: &mut bayes_estimate::estimators::sir::Resampler,
roughener: &mut sir::Roughener<N, DL>) -> Result<(u32, f32), &'static str>
{
let nsamples = self.loc.s.len();
let (resamples, unqiue, lcond) = resampler(&mut self.loc.w, &mut self.loc.rng)?;
// Keep live sample
sir::live_samples(&mut self.loc.s, &resamples);
// Resampling results in uniform likelihoods
self.loc.w.fill(1.);
// Propagate resampling to All features
for (_feature, fm) in self.map.iter_mut() { // All Features
let mut fmr = FeatureCondMap::with_capacity(nsamples); // Resampled feature map
for fmi in fm.iter().enumerate() { // Iterate over all feature particles
// Multiple copies of this resampled feature
for _res in 0..resamples[fmi.0] {
fmr.push(fmi.1.clone());
}
}
*fm = fmr; // Copy in resamples feature map
}
roughener(&mut self.loc.s, &mut self.loc.rng); // Roughen location
Ok((unqiue, lcond))
}
/// Compute sample mean and covariance statistics of feature.
///
/// Use the Maximum Likelihood (bias) estimate definition of covariance (1/n).
///
/// fs is subscript in kstat to return statisics
/// fm feature map of interest
/// fi iterator of feature (statistics are computed for this map subset)
///
/// Numerics
/// No check is made for the conditioning of samples with regard to mean and covariance.
/// Extreme ranges or very large sample sizes will result in inaccuracy.
/// The covariance should always remain PSD however.
///
/// Precond: kstat contains the location mean and previous (smaller fs) feature statistics
fn feature_statistics(&self, kstat: &mut KalmanState<N, Dynamic>,
fs: usize, fm: &FeatureCondMap<N, DF>,
fi: &mut dyn Iterator<Item = &FeatureCondMap<N, DF>>)
{
let nl = self.loc.s.first().unwrap().nrows(); // No of location states
let nsamples = N::from_usize(self.loc.s.len()).unwrap();
// Feature mean
let mean_f = fm.iter().map(|f| &f.x).sum::<VectorN<N, DF>>() / nsamples;
kstat.x.fixed_rows_mut::<DF>(fs).copy_from(&mean_f);
// Feature variance: is ML estimate given estimated mean
let nf = mean_f.data.shape().0;
let mut var_f = MatrixMN::zeros_generic(nf, nf);
for fp in fm.iter() {
let xx = &fp.x - &mean_f;
var_f += &fp.X + &xx * xx.transpose();
}
var_f /= nsamples;
kstat.X.fixed_slice_mut::<DF,DF>(fs,fs).copy_from(&var_f);
// Location,feature covariance
for ls in 0..nl {
let mut cov = VectorN::zeros_generic(nf, U1);
let mean_si = kstat.x[ls];
for fp in fm.iter().enumerate() {
cov += (&fp.1.x - &mean_f) * (self.loc.s[fp.0][ls] - mean_si);
}
cov /= nsamples;
kstat.X.fixed_slice_mut::<DF, U1>(ls, fs).copy_from(&cov);
}
// Feature,feature covariance. Iterate over previous features with means already computed
let mut fjs = nl; // Feature subscript
for fj in fi {
let mut cov = MatrixN::zeros_generic(nf, nf);
let mut fpj = fj.iter();
let mean_fi = kstat.x.fixed_rows::<DF>(fjs);
for fp in fm.iter() {
cov += (&fp.x - &mean_f) * (&fpj.next().unwrap().x - &mean_fi).transpose();
}
cov /= nsamples as N;
kstat.X.fixed_slice_mut::<DF, DF>(fjs, fs).copy_from(&cov);
fjs += DF::dim();
}
}
/// Compute sample mean and covariance statistics of filter.
///
/// kstat elements are filled first with Location statistics and then the Map feature statistics.
/// Feature statistics are are computed in feature number order and only for those for which there is space in kstat.
///
/// Note: Covariance values are indeterminate for nparticles ==1
/// Precond:
/// kstat must have space for Location statistics
pub fn statistics_compressed(&self, kstat: &mut KalmanState<N, Dynamic>)
where DefaultAllocator: Allocator<N, U1, DL>
{
let nl = self.loc.s.first().unwrap().data.shape().0; // No of location states
assert!(nl.value() + self.map.len() <= kstat.x.nrows(), "kstat to small to hold location and features");
kstat.x.fill(N::zero()); // Zero everything (required only for non existing feature states
kstat.X.fill(N::zero()); // Zero everything (required only for non existing feature states
// Get Location statistics
let loc_state = KalmanEstimator::<N, DL>::kalman_state(&self.loc).unwrap();
kstat.x.rows_generic_mut(0,nl).copy_from(&loc_state.x);
kstat.X.generic_slice_mut((0, 0), (nl,nl)).copy_from(&loc_state.X);
// Iterated over feature statistics
let mut fs = nl.value(); // Feature subscript
for f in self.map.iter() {
let mut until_fi = self.map.iter().take_while(|it| {it.0 != f.0}).map(|it| {it.1});
self.feature_statistics(kstat, fs, f.1, &mut until_fi); // build statistics of fi with other features up to fi
fs += DF::dim();
}
kstat.X.fill_lower_triangle_with_upper_triangle();
}
/// Compute sample mean and covariance statistics of filter.
///
/// kstat elements are filled first with Location statistics and then the Map feature statistics.
/// Feature statistics are are computed in feature number as index (after location) and only for those for which there is space in kstat.
///
/// Note: Covariance values are indeterminate for nparticles ==1
/// Precond:
/// kstat must have space for Location statistics
pub fn statistics_sparse(&self, kstat: &mut KalmanState<N, Dynamic>)
where DefaultAllocator: Allocator<N, U1, DL>
{
let nl = self.loc.s.first().unwrap().data.shape().0; // No of location states
assert!(nl.value() + self.map.len() <= kstat.x.nrows(), "kstat to small to hold location and features");
kstat.x.fill(N::zero()); // Zero everything (required only for non existing feature states
kstat.X.fill(N::zero()); // Zero everything (required only for non existing feature states
// Get Location statistics
let loc_state = KalmanEstimator::<N,DL>::kalman_state(&self.loc).unwrap();
kstat.x.rows_generic_mut(0,nl).copy_from(&loc_state.x);
kstat.X.generic_slice_mut((0, 0), (nl,nl)).copy_from(&loc_state.X);
// Iterated over feature statistics
let mut orderd_features = self.map.iter().map(|m| { *m.0 }).collect::<Vec<_>>();
orderd_features.sort();
for fi in orderd_features.iter().cloned() {
let fs = nl.value() + DF::dim() * fi as usize; // Feature subscript
let mut until_fi = orderd_features.iter().take_while(|it| {**it != fi}).map(|it| {self.map.get(it).unwrap()});
self.feature_statistics(kstat, fs, self.map.get(&fi).unwrap(), &mut until_fi); // build statistics of fi with other features up to fi
}
kstat.X.fill_lower_triangle_with_upper_triangle();
}
}