#![allow(dead_code)]
use crate::TimeSeries;
use scirs2_core::random::{thread_rng, Distribution, Uniform};
use torsh_tensor::{
creation::{ones, randn, zeros},
Tensor,
};
pub struct ParticleFilter {
num_particles: usize,
state_dim: usize,
particles: Tensor,
weights: Tensor,
ess_threshold: f64,
}
impl ParticleFilter {
pub fn new(num_particles: usize, state_dim: usize) -> Self {
let particles: Tensor<f32> =
randn(&[num_particles, state_dim]).expect("tensor creation should succeed");
let weights = ones::<f32>(&[num_particles])
.expect("tensor creation should succeed")
.div_scalar(num_particles as f32)
.expect("weight normalization should succeed");
Self {
num_particles,
state_dim,
particles,
weights,
ess_threshold: 0.5,
}
}
pub fn with_initial_particles(particles: Tensor, weights: Option<Tensor>) -> Self {
let shape = particles.shape();
let num_particles = shape.dims()[0];
let state_dim = shape.dims()[1];
let weights = weights.unwrap_or_else(|| {
ones(&[num_particles]).expect("tensor creation should succeed")
});
Self {
num_particles,
state_dim,
particles,
weights,
ess_threshold: 0.5,
}
}
pub fn with_ess_threshold(mut self, threshold: f64) -> Self {
self.ess_threshold = threshold;
self
}
pub fn config(&self) -> (usize, usize, f64) {
(self.num_particles, self.state_dim, self.ess_threshold)
}
pub fn particles(&self) -> &Tensor {
&self.particles
}
pub fn weights(&self) -> &Tensor {
&self.weights
}
pub fn set_particles(&mut self, particles: Tensor) {
self.particles = particles;
}
pub fn set_weights(&mut self, weights: Tensor) {
self.weights = weights;
self.normalize_weights();
}
fn normalize_weights(&mut self) {
let mut weights_vec = self.weights.to_vec().unwrap_or_default();
let weight_sum: f32 = weights_vec.iter().sum();
if weight_sum > 1e-10 {
for w in &mut weights_vec {
*w /= weight_sum;
}
self.weights = Tensor::from_vec(weights_vec, &[self.num_particles])
.expect("tensor creation should succeed");
}
}
fn effective_sample_size(&self) -> f64 {
let weights_vec = self.weights.to_vec().unwrap_or_default();
let sum_squared: f64 = weights_vec.iter().map(|&w| (w as f64) * (w as f64)).sum();
if sum_squared > 1e-10 {
1.0 / sum_squared
} else {
self.num_particles as f64
}
}
fn systematic_resample(&mut self) {
let weights_vec = self.weights.to_vec().unwrap_or_default();
let particles_vec = self.particles.to_vec().unwrap_or_default();
let mut cumulative_weights = Vec::with_capacity(self.num_particles);
let mut cumsum = 0.0f64;
for &w in &weights_vec {
cumsum += w as f64;
cumulative_weights.push(cumsum);
}
let total_weight = cumulative_weights.last().copied().unwrap_or(1.0);
if total_weight > 1e-10 {
for cw in &mut cumulative_weights {
*cw /= total_weight;
}
}
let mut rng = thread_rng();
let dist = Uniform::new(0.0, 1.0 / self.num_particles as f64)
.expect("distribution should succeed");
let u0 = dist.sample(&mut rng);
let mut resampled_indices = Vec::with_capacity(self.num_particles);
let mut j = 0;
for i in 0..self.num_particles {
let u = u0 + (i as f64) / (self.num_particles as f64);
while j < cumulative_weights.len() && cumulative_weights[j] < u {
j += 1;
}
let idx = j.min(self.num_particles - 1);
resampled_indices.push(idx);
}
let mut resampled_particles = Vec::with_capacity(self.num_particles * self.state_dim);
for &idx in &resampled_indices {
for d in 0..self.state_dim {
let particle_idx = idx * self.state_dim + d;
resampled_particles.push(particles_vec[particle_idx]);
}
}
self.particles =
Tensor::from_vec(resampled_particles, &[self.num_particles, self.state_dim])
.expect("tensor creation should succeed");
}
fn multinomial_resample(&mut self) {
let weights_vec = self.weights.to_vec().unwrap_or_default();
let particles_vec = self.particles.to_vec().unwrap_or_default();
let mut cumulative_weights = Vec::with_capacity(self.num_particles);
let mut cumsum = 0.0f64;
for &w in &weights_vec {
cumsum += w as f64;
cumulative_weights.push(cumsum);
}
let total_weight = cumulative_weights.last().copied().unwrap_or(1.0);
if total_weight > 1e-10 {
for cw in &mut cumulative_weights {
*cw /= total_weight;
}
}
let mut rng = thread_rng();
let dist = Uniform::new(0.0, 1.0).expect("distribution should succeed");
let mut resampled_indices = Vec::with_capacity(self.num_particles);
for _ in 0..self.num_particles {
let u = dist.sample(&mut rng);
let idx = cumulative_weights
.iter()
.position(|&cw| cw >= u)
.unwrap_or(self.num_particles - 1);
resampled_indices.push(idx);
}
let mut resampled_particles = Vec::with_capacity(self.num_particles * self.state_dim);
for &idx in &resampled_indices {
for d in 0..self.state_dim {
let particle_idx = idx * self.state_dim + d;
resampled_particles.push(particles_vec[particle_idx]);
}
}
self.particles =
Tensor::from_vec(resampled_particles, &[self.num_particles, self.state_dim])
.expect("tensor creation should succeed");
}
pub fn resample(&mut self, method: ResamplingMethod) {
let ess = self.effective_sample_size();
let threshold = self.ess_threshold * self.num_particles as f64;
if ess < threshold {
match method {
ResamplingMethod::Systematic => self.systematic_resample(),
ResamplingMethod::Multinomial => self.multinomial_resample(),
}
self.weights = ones(&[self.num_particles]).expect("tensor creation should succeed");
self.normalize_weights();
}
}
pub fn predict(&mut self, transition_fn: &dyn Fn(&Tensor) -> Tensor) {
for i in 0..self.num_particles {
let particle = self
.particles
.slice_tensor(0, i, i + 1)
.expect("slice should succeed");
let _predicted = transition_fn(&particle);
}
}
pub fn predict_with_noise(
&mut self,
transition_fn: &dyn Fn(&Tensor) -> Tensor,
noise_std: f32,
) {
let mut new_particles = Vec::with_capacity(self.num_particles * self.state_dim);
for i in 0..self.num_particles {
let particle = self
.particles
.slice_tensor(0, i, i + 1)
.expect("slice should succeed");
let predicted = transition_fn(&particle);
let noise: Tensor<f32> =
randn(&[1, self.state_dim]).expect("tensor creation should succeed");
let scaled_noise = noise
.mul_scalar(noise_std)
.expect("noise scaling should succeed");
let noisy_predicted = predicted
.add(&scaled_noise)
.expect("noise addition should succeed");
let row = noisy_predicted
.to_vec()
.expect("particle data extraction should succeed");
new_particles.extend_from_slice(&row);
}
self.particles = Tensor::from_vec(new_particles, &[self.num_particles, self.state_dim])
.expect("particle tensor reconstruction should succeed");
}
pub fn update(
&mut self,
observation: &Tensor,
likelihood_fn: &dyn Fn(&Tensor, &Tensor) -> f64,
) {
let mut weights_vec = self
.weights
.to_vec()
.expect("weight data extraction should succeed");
for i in 0..self.num_particles {
let particle = self
.particles
.slice_tensor(0, i, i + 1)
.expect("slice should succeed");
let likelihood = likelihood_fn(&particle, observation);
weights_vec[i] *= likelihood as f32;
}
self.weights = Tensor::from_vec(weights_vec, &[self.num_particles])
.expect("weight tensor reconstruction should succeed");
self.normalize_weights();
}
pub fn estimate(&self) -> Tensor {
zeros(&[self.state_dim]).expect("tensor creation should succeed")
}
pub fn covariance(&self) -> Tensor {
zeros(&[self.state_dim, self.state_dim]).expect("tensor creation should succeed")
}
pub fn filter(
&mut self,
series: &TimeSeries,
transition_fn: &dyn Fn(&Tensor) -> Tensor,
likelihood_fn: &dyn Fn(&Tensor, &Tensor) -> f64,
) -> TimeSeries {
let mut estimates = Vec::new();
for t in 0..series.len() {
self.predict(transition_fn);
let obs = series
.values
.slice_tensor(0, t, t + 1)
.expect("slice should succeed");
self.update(&obs, likelihood_fn);
self.resample(ResamplingMethod::Systematic);
estimates.push(self.estimate());
}
let values =
zeros(&[series.len(), self.state_dim]).expect("tensor creation should succeed");
TimeSeries::new(values)
}
pub fn smooth(
&mut self,
series: &TimeSeries,
transition_fn: &dyn Fn(&Tensor) -> Tensor,
likelihood_fn: &dyn Fn(&Tensor, &Tensor) -> f64,
) -> TimeSeries {
let filtered = self.filter(series, transition_fn, likelihood_fn);
filtered
}
pub fn reset(&mut self) {
self.particles = randn::<f32>(&[self.num_particles, self.state_dim])
.expect("tensor creation should succeed");
self.weights = ones(&[self.num_particles]).expect("tensor creation should succeed");
self.normalize_weights();
}
pub fn statistics(&self) -> ParticleStats {
ParticleStats {
num_particles: self.num_particles,
ess: self.effective_sample_size(),
mean: self.estimate(),
covariance: self.covariance(),
}
}
}
pub enum ResamplingMethod {
Systematic,
Multinomial,
}
pub struct ParticleStats {
pub num_particles: usize,
pub ess: f64,
pub mean: Tensor,
pub covariance: Tensor,
}
#[cfg(test)]
mod tests {
use super::*;
fn create_test_series() -> TimeSeries {
let data = vec![1.0f32, 2.0, 3.0, 4.0, 5.0];
let tensor = Tensor::from_vec(data, &[5]).expect("Tensor should succeed");
TimeSeries::new(tensor)
}
fn identity_transition(x: &Tensor) -> Tensor {
x.clone()
}
fn gaussian_likelihood(_particle: &Tensor, _obs: &Tensor) -> f64 {
1.0
}
#[test]
fn test_particle_filter_creation() {
let pf = ParticleFilter::new(100, 2);
let (num_particles, state_dim, ess_threshold) = pf.config();
assert_eq!(num_particles, 100);
assert_eq!(state_dim, 2);
assert_eq!(ess_threshold, 0.5);
}
#[test]
fn test_particle_filter_with_initial() {
let particles: Tensor<f32> = randn(&[50, 3]).expect("randn should succeed");
let weights = ones(&[50]).expect("ones should succeed");
let pf = ParticleFilter::with_initial_particles(particles, Some(weights));
let (num_particles, state_dim, _) = pf.config();
assert_eq!(num_particles, 50);
assert_eq!(state_dim, 3);
}
#[test]
fn test_particle_filter_ess_threshold() {
let pf = ParticleFilter::new(100, 2).with_ess_threshold(0.3);
let (_, _, threshold) = pf.config();
assert_eq!(threshold, 0.3);
}
#[test]
fn test_particle_filter_particles() {
let pf = ParticleFilter::new(100, 2);
assert_eq!(pf.particles().shape().dims(), [100, 2]);
assert_eq!(pf.weights().shape().dims(), [100]);
}
#[test]
fn test_particle_filter_set_particles() {
let mut pf = ParticleFilter::new(50, 2);
let new_particles = zeros(&[50, 2]).expect("zeros should succeed");
pf.set_particles(new_particles);
assert_eq!(pf.particles().shape().dims(), [50, 2]);
}
#[test]
fn test_particle_filter_set_weights() {
let mut pf = ParticleFilter::new(50, 2);
let new_weights = ones(&[50]).expect("ones should succeed");
pf.set_weights(new_weights);
assert_eq!(pf.weights().shape().dims(), [50]);
}
#[test]
fn test_particle_filter_predict() {
let mut pf = ParticleFilter::new(10, 2);
pf.predict(&identity_transition);
assert_eq!(pf.particles().shape().dims(), [10, 2]);
}
#[test]
fn test_particle_filter_predict_with_noise() {
let mut pf = ParticleFilter::new(10, 2);
pf.predict_with_noise(&identity_transition, 0.1);
assert_eq!(pf.particles().shape().dims(), [10, 2]);
}
#[test]
fn test_particle_filter_update() {
let mut pf = ParticleFilter::new(10, 2);
let obs = zeros(&[1]).expect("zeros should succeed");
pf.update(&obs, &gaussian_likelihood);
assert_eq!(pf.weights().shape().dims(), [10]);
}
#[test]
fn test_particle_filter_resample() {
let mut pf = ParticleFilter::new(10, 2);
pf.resample(ResamplingMethod::Systematic);
pf.resample(ResamplingMethod::Multinomial);
assert_eq!(pf.particles().shape().dims(), [10, 2]);
}
#[test]
fn test_particle_filter_estimate() {
let pf = ParticleFilter::new(10, 2);
let estimate = pf.estimate();
assert_eq!(estimate.shape().dims(), [2]);
}
#[test]
fn test_particle_filter_covariance() {
let pf = ParticleFilter::new(10, 2);
let cov = pf.covariance();
assert_eq!(cov.shape().dims(), [2, 2]);
}
#[test]
fn test_particle_filter_filter() {
let series = create_test_series();
let mut pf = ParticleFilter::new(20, 1);
let filtered = pf.filter(&series, &identity_transition, &gaussian_likelihood);
assert_eq!(filtered.len(), series.len());
}
#[test]
fn test_particle_filter_smooth() {
let series = create_test_series();
let mut pf = ParticleFilter::new(20, 1);
let smoothed = pf.smooth(&series, &identity_transition, &gaussian_likelihood);
assert_eq!(smoothed.len(), series.len());
}
#[test]
fn test_particle_filter_reset() {
let mut pf = ParticleFilter::new(10, 2);
pf.reset();
assert_eq!(pf.particles().shape().dims(), [10, 2]);
assert_eq!(pf.weights().shape().dims(), [10]);
}
#[test]
fn test_particle_filter_statistics() {
let pf = ParticleFilter::new(10, 2);
let stats = pf.statistics();
assert_eq!(stats.num_particles, 10);
assert_eq!(stats.mean.shape().dims(), [2]);
assert_eq!(stats.covariance.shape().dims(), [2, 2]);
}
}