use num_traits::{Float, Pow};
use rand::{Rng, RngCore};
use rand_distr::{Distribution, StandardNormal, Uniform};
use na::{DefaultAllocator, Dim, U1, MatrixN, RealField, VectorN};
use na::allocator::Allocator;
use na::storage::Storage;
use nalgebra as na;
use crate::cholesky::UDU;
use crate::matrix::{check_non_negativ};
use crate::models::{Estimator, KalmanEstimator, KalmanState};
use crate::noise::CorrelatedNoise;
pub struct SampleState<N: RealField, D: Dim>
where
DefaultAllocator: Allocator<N, D>,
{
pub s: Samples<N, D>,
pub w: Likelihoods,
pub rng: Box<dyn RngCore>
}
pub type Samples<N, D> = Vec<VectorN<N, D>>;
pub type Likelihoods = Vec<f32>;
pub type Resamples = Vec<u32>;
pub type Resampler = dyn FnMut(&mut Likelihoods, &mut dyn RngCore) -> Result<(Resamples, u32, f32), &'static str>;
pub type Roughener<N, D> = dyn FnMut(&mut Vec<VectorN<N, D>>, &mut dyn RngCore);
impl<N: RealField, D: Dim> SampleState<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
pub fn new_equal_likelihood(s: Vec<VectorN<N, D>>, rng: Box<dyn RngCore>) -> SampleState<N, D> {
let samples = s.len();
SampleState {
s,
w: vec![1f32; samples],
rng
}
}
pub fn predict(&mut self, f: fn(&VectorN<N, D>) -> VectorN<N, D>)
{
self.s.iter_mut().for_each(|el|{
el.copy_from(&f(el))
});
}
pub fn predict_sampled(&mut self, f: impl Fn(&VectorN<N, D>, &mut dyn RngCore) -> VectorN<N, D>)
{
for si in 0..self.s.len() {
let ps = f(&self.s[si], &mut self.rng);
self.s[si].copy_from(&ps)
}
}
pub fn observe<LikelihoodFn>(&mut self, l: LikelihoodFn)
where
LikelihoodFn: Fn(&VectorN<N, D>) -> f32,
{
let mut wi = self.w.iter_mut();
for z in self.s.iter() {
let w = wi.next().unwrap();
*w *= l(z);
}
}
pub fn observe_likelihood(&mut self, l: Likelihoods) {
assert!(self.w.len() == l.len());
let mut li = l.iter();
for wi in self.w.iter_mut() {
*wi *= li.next().unwrap();
}
}
pub fn update_resample(&mut self, resampler: &mut Resampler, roughener: &mut Roughener<N, D>) -> Result<(u32, f32), &'static str>
{
let (resamples, unqiue_samples, lcond) = resampler(&mut self.w, self.rng.as_mut())?;
SampleState::live_samples(&mut self.s, &resamples);
roughener(&mut self.s, self.rng.as_mut());
self.w.fill(1.);
Ok((unqiue_samples, lcond))
}
fn live_samples(s: &mut Vec<VectorN<N, D>>, resamples: &Resamples)
{
let mut si = s.len();
let mut livei = si;
for pr in resamples.iter().rev() {
si -= 1;
if *pr > 0 {
livei -= 1;
s[livei] = s[si].clone();
}
}
assert!(si == 0);
si = 0;
for pr in resamples {
let mut res = *pr;
if res > 0 {
loop {
s[si] = s[livei].clone();
si += 1;
res -= 1;
if res == 0 { break; }
}
livei += 1;
}
}
assert!(si == s.len());
assert!(livei == s.len());
}
}
impl<N: RealField, D: Dim> Estimator<N, D> for SampleState<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
fn state(&self) -> Result<VectorN<N, D>, &'static str> {
let s_shape = self.s[0].data.shape();
let mut x = VectorN::zeros_generic(s_shape.0, s_shape.1);
for s in self.s.iter() {
x += s;
}
x /= N::from_usize(self.s.len()).unwrap();
Ok(x)
}
}
pub fn standard_resampler(w: &mut Likelihoods, rng: &mut dyn RngCore) -> Result<(Resamples, u32, f32), &'static str>
{
let (lmin, lcum) = cumaltive_likelihood(w)?;
let uniform01: Uniform<f32> = Uniform::new(0f32, 1f32);
let mut ur: Vec<f32> = rng.sample_iter(uniform01).take(w.len()).collect();
ur.sort_by(|a, b| a.total_cmp(&b));
assert!(*ur.first().unwrap() >= 0. && *ur.last().unwrap() < 1.);
ur.iter_mut().for_each(|el| *el *= lcum);
let mut uri = ur.iter().cloned();
let mut urn = uri.next();
let mut unique : u32 = 0;
let mut resamples = Resamples::with_capacity(w.len());
for wi in w.iter().cloned() {
let mut res: u32 = 0;
if (urn.is_some()) && urn.unwrap() < wi {
unique += 1;
loop {
res += 1;
urn = uri.next();
if urn.is_none() { break; }
if !(urn.unwrap() < wi) { break;}
}
}
resamples.push(res);
}
if uri.peekable().peek().is_some() {
return Err("likelihoods are not numeric and cannot be resampled");
}
return Ok((resamples, unique, lmin / lcum));
}
pub fn systematic_resampler(l: &mut Likelihoods, rng: &mut dyn RngCore) -> Result<(Resamples, u32, f32), &'static str>
{
let (lmin, lcum) = cumaltive_likelihood(l)?;
let lstep = lcum / l.len() as f32;
let uniform01: Uniform<f32> = Uniform::new(0f32, 1f32);
let ur = rng.sample(uniform01);
let mut resamples = Resamples::with_capacity(l.len());
let mut unique: u32 = 0;
let mut s = ur * lstep;
for li in l.iter() {
let mut res: u32 = 0;
if s < *li {
unique += 1;
loop {
res += 1;
s += lstep;
if !(s < *li) {break;}
}
}
resamples.push(res);
}
Ok((resamples, unique, lmin / lcum))
}
fn cumaltive_likelihood(l: &mut Likelihoods) -> Result<(f32, f32), &'static str> {
let mut lmin = f32::max_value();
let mut lcum = 0.;
{
let mut c = 0.;
for li in l.iter_mut() {
if *li < lmin {
lmin = *li;
}
let y = *li - c;
let t = lcum + y;
c = t - lcum - y;
lcum = t;
*li = t;
}
}
if lmin < 0.{
return Err("negative likelihood");
}
if lcum <= 0. {
return Err("zero cumulative likelihood sum");
}
if lcum != lcum {
return Err("NaN cumulative likelihood sum");
}
Ok((lmin, lcum))
}
pub fn roughen_minmax<N: RealField, D:Dim>(s: &mut Vec<VectorN<N, D>>, k: f32, rng: &mut dyn RngCore)
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
let x_dim = s[0].data.shape().0;
let x_size = x_dim.value();
let sigma_scale = k * f32::pow(s.len() as f32, -1f32/(x_size as f32));
let mut xmin = s[0].clone();
let mut xmax = xmin.clone();
for si in s.iter_mut() {
let mut mini = xmin.iter_mut();
let mut maxi = xmax.iter_mut();
for xd in si.iter() {
let minx = mini.next().unwrap();
let maxx = maxi.next().unwrap();
if *xd < *minx {*minx = *xd;}
if *xd > *maxx {*maxx = *xd;}
}
}
let sigma = (xmax - xmin) * N::from_f32(sigma_scale).unwrap();
for si in s.iter_mut() {
let normal = StandardNormal.sample_iter(&mut *rng).map(|n| {N::from_f32(n).unwrap()});
let mut nr = VectorN::<N, D>::from_iterator_generic(x_dim, U1, normal.take(x_size));
for (ni, n) in nr.iter_mut().enumerate() {
*n *= sigma[ni];
}
*si += nr;
}
}
impl<N: RealField, D: Dim> CorrelatedNoise<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
pub fn sampler<'s>(&'s self) -> Result<impl Fn(&mut dyn RngCore) -> VectorN<N,D> + 's, &'static str>
{
let mut uc = self.Q.clone();
let udu = UDU::new();
let rcond = udu.UCfactor_n(&mut uc, self.Q.nrows());
check_non_negativ(rcond, "Q not PSD")?;
udu.Lzero(&mut uc);
let noise_fn = move |rng: &mut dyn RngCore| -> VectorN<N,D> {
let rnormal = StandardNormal.sample_iter(rng).map(|n| {N::from_f32(n).unwrap()}).take(self.Q.nrows());
let n = VectorN::<N, D>::from_iterator_generic(self.Q.data.shape().0, U1, rnormal);
&uc * n
};
Ok(noise_fn)
}
}
impl<N: RealField, D: Dim> KalmanEstimator<N, D> for SampleState<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, U1, D> + Allocator<N, D>,
{
fn init(&mut self, state: &KalmanState<N, D>) -> Result<(), &'static str> {
for s in self.s.iter_mut() {
s.copy_from(&state.x);
}
let noise = CorrelatedNoise { Q: state.X.clone() };
let sampler = noise.sampler()?;
self.predict_sampled(move |x: &VectorN<N,D>, rng: &mut dyn RngCore| -> VectorN<N,D> {
x + sampler(rng)
});
Ok(())
}
fn kalman_state(&self) -> Result<KalmanState<N, D>, &'static str> {
let x = self.state()?;
let s_shape = self.s[0].data.shape();
let mut xx = MatrixN::zeros_generic(s_shape.0, s_shape.0);
for s in self.s.iter() {
let sx = s - &x;
let sxt = sx.transpose();
xx += sx * sxt;
}
xx /= N::from_usize(self.s.len()).unwrap();
Ok(KalmanState { x, X: xx })
}
}