use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::random::thread_rng;
#[derive(Debug, Clone)]
pub struct DynamicLinearModel {
pub state_dim: usize,
pub obs_dim: usize,
pub state_mean: Array1<f64>,
pub state_cov: Array2<f64>,
pub evolution_matrix: Array2<f64>,
pub observation_matrix: Array2<f64>,
pub observation_variance: f64,
pub evolution_cov: Array2<f64>,
pub discount_factor: Option<f64>,
pub time_step: usize,
}
impl DynamicLinearModel {
pub fn new(state_dim: usize, obs_dim: usize) -> Self {
let mut observation_matrix = Array2::zeros((obs_dim, state_dim));
for i in 0..obs_dim.min(state_dim) {
observation_matrix[[i, i]] = 1.0;
}
Self {
state_dim,
obs_dim,
state_mean: Array1::zeros(state_dim),
state_cov: Array2::eye(state_dim),
evolution_matrix: Array2::eye(state_dim),
observation_matrix,
observation_variance: 1.0,
evolution_cov: Array2::eye(state_dim),
discount_factor: None,
time_step: 0,
}
}
pub fn polynomial_trend(order: usize, obs_variance: f64, evolution_variance: f64) -> Self {
let state_dim = order + 1;
let obs_dim = 1;
let mut g = Array2::zeros((state_dim, state_dim));
for i in 0..state_dim {
for j in i..state_dim {
if j == i {
g[[i, j]] = 1.0;
} else if j == i + 1 {
g[[i, j]] = 1.0;
}
}
}
let mut f = Array2::zeros((obs_dim, state_dim));
f[[0, 0]] = 1.0;
let mut w = Array2::zeros((state_dim, state_dim));
w[[state_dim - 1, state_dim - 1]] = evolution_variance;
Self {
state_dim,
obs_dim,
state_mean: Array1::zeros(state_dim),
state_cov: Array2::eye(state_dim) * 10.0, evolution_matrix: g,
observation_matrix: f,
observation_variance: obs_variance,
evolution_cov: w,
discount_factor: None,
time_step: 0,
}
}
pub fn seasonal(period: usize, obs_variance: f64, evolution_variance: f64) -> Self {
let state_dim = period - 1;
let obs_dim = 1;
let mut g = Array2::zeros((state_dim, state_dim));
for i in 0..state_dim - 1 {
g[[i, i + 1]] = 1.0;
}
for j in 0..state_dim {
g[[state_dim - 1, j]] = -1.0;
}
let mut f = Array2::zeros((obs_dim, state_dim));
f[[0, 0]] = 1.0;
let w = Array2::eye(state_dim) * evolution_variance;
Self {
state_dim,
obs_dim,
state_mean: Array1::zeros(state_dim),
state_cov: Array2::eye(state_dim) * 10.0,
evolution_matrix: g,
observation_matrix: f,
observation_variance: obs_variance,
evolution_cov: w,
discount_factor: None,
time_step: 0,
}
}
pub fn with_discount_factor(mut self, discount: f64) -> Self {
assert!(
discount > 0.0 && discount < 1.0,
"Discount factor must be in (0, 1)"
);
self.discount_factor = Some(discount);
self
}
pub fn with_initial_state(mut self, mean: Array1<f64>, cov: Array2<f64>) -> Self {
assert_eq!(mean.len(), self.state_dim);
assert_eq!(cov.shape(), &[self.state_dim, self.state_dim]);
self.state_mean = mean;
self.state_cov = cov;
self
}
pub fn with_evolution_matrix(mut self, g: Array2<f64>) -> Self {
assert_eq!(g.shape(), &[self.state_dim, self.state_dim]);
self.evolution_matrix = g;
self
}
pub fn with_observation_matrix(mut self, f: Array2<f64>) -> Self {
assert_eq!(f.shape(), &[self.obs_dim, self.state_dim]);
self.observation_matrix = f;
self
}
pub fn predict(&mut self) -> (Array1<f64>, Array2<f64>) {
let a_t = self.evolution_matrix.dot(&self.state_mean);
let r_t = if let Some(delta) = self.discount_factor {
let gc = self.evolution_matrix.dot(&self.state_cov);
let gcg = gc.dot(&self.evolution_matrix.t()) / delta;
gcg
} else {
let gc = self.evolution_matrix.dot(&self.state_cov);
let gcg = gc.dot(&self.evolution_matrix.t());
&gcg + &self.evolution_cov
};
(a_t, r_t)
}
pub fn update(&mut self, observation: f64) -> Result<f64, String> {
let (a_t, r_t) = self.predict();
let f_t_vec = self.observation_matrix.row(0).to_owned();
let f_t = f_t_vec.dot(&a_t);
let rf = r_t.dot(&f_t_vec);
let q_t = f_t_vec.dot(&rf) + self.observation_variance;
if q_t <= 0.0 {
return Err("Forecast variance must be positive".to_string());
}
let e_t = observation - f_t;
let a_kalman = &rf / q_t;
let m_t = &a_t + &(&a_kalman * e_t);
let aa_q = a_kalman
.clone()
.into_shape_with_order((self.state_dim, 1))
.expect("reshape should succeed for compatible dimensions");
let aa_q_t = aa_q.dot(&aa_q.t()) * q_t;
let c_t = &r_t - &aa_q_t;
self.state_mean = m_t;
self.state_cov = c_t;
self.time_step += 1;
Ok(f_t)
}
pub fn forecast(&self, steps: usize) -> Vec<(f64, f64)> {
let mut forecasts = Vec::with_capacity(steps);
let mut state_mean = self.state_mean.clone();
let mut state_cov = self.state_cov.clone();
for _ in 0..steps {
state_mean = self.evolution_matrix.dot(&state_mean);
state_cov = if let Some(delta) = self.discount_factor {
self.evolution_matrix
.dot(&state_cov)
.dot(&self.evolution_matrix.t())
/ delta
} else {
let temp = self
.evolution_matrix
.dot(&state_cov)
.dot(&self.evolution_matrix.t());
&temp + &self.evolution_cov
};
let f_t_vec = self.observation_matrix.row(0).to_owned();
let f_t = f_t_vec.dot(&state_mean);
let rf = state_cov.dot(&f_t_vec);
let q_t = f_t_vec.dot(&rf) + self.observation_variance;
forecasts.push((f_t, q_t));
}
forecasts
}
pub fn forecast_samples(&self, steps: usize, num_samples: usize) -> Vec<Vec<f64>> {
let mut rng = thread_rng();
let mut samples = vec![Vec::with_capacity(steps); num_samples];
for sample_idx in 0..num_samples {
let mut state = self.state_mean.clone();
for _ in 0..steps {
let evolution_noise = if let Some(_delta) = self.discount_factor {
Array1::zeros(self.state_dim) } else {
Array1::from_vec(
(0..self.state_dim)
.map(|i| {
let std = self.evolution_cov[[i, i]].sqrt();
let z: f64 = rng.gen_range(-3.0..3.0); z * std
})
.collect(),
)
};
state = &self.evolution_matrix.dot(&state) + &evolution_noise;
let f_t_vec = self.observation_matrix.row(0).to_owned();
let y_mean = f_t_vec.dot(&state);
let obs_std = self.observation_variance.sqrt();
let z: f64 = rng.gen_range(-3.0..3.0); let y_sample = y_mean + z * obs_std;
samples[sample_idx].push(y_sample);
}
}
samples
}
pub fn fit(&mut self, observations: &[f64]) -> Result<Vec<f64>, String> {
let mut forecasts = Vec::with_capacity(observations.len());
for &obs in observations {
let forecast = self.update(obs)?;
forecasts.push(forecast);
}
Ok(forecasts)
}
pub fn reset(&mut self) {
self.state_mean = Array1::zeros(self.state_dim);
self.state_cov = Array2::eye(self.state_dim);
self.time_step = 0;
}
pub fn get_state(&self) -> (&Array1<f64>, &Array2<f64>) {
(&self.state_mean, &self.state_cov)
}
pub fn smooth(&self, observations: &[f64]) -> Result<Vec<Array1<f64>>, String> {
let mut filtered_means = Vec::with_capacity(observations.len());
let mut filtered_covs = Vec::with_capacity(observations.len());
let mut dlm_copy = self.clone();
for &obs in observations {
dlm_copy.update(obs)?;
filtered_means.push(dlm_copy.state_mean.clone());
filtered_covs.push(dlm_copy.state_cov.clone());
}
Ok(filtered_means)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dlm_creation() {
let dlm = DynamicLinearModel::new(2, 1);
assert_eq!(dlm.state_dim, 2);
assert_eq!(dlm.obs_dim, 1);
assert_eq!(dlm.time_step, 0);
}
#[test]
fn test_polynomial_trend() {
let dlm = DynamicLinearModel::polynomial_trend(1, 1.0, 0.1);
assert_eq!(dlm.state_dim, 2); assert_eq!(dlm.evolution_matrix[[0, 0]], 1.0);
assert_eq!(dlm.evolution_matrix[[0, 1]], 1.0);
}
#[test]
fn test_seasonal_dlm() {
let dlm = DynamicLinearModel::seasonal(4, 1.0, 0.01);
assert_eq!(dlm.state_dim, 3); assert_eq!(dlm.observation_matrix[[0, 0]], 1.0);
}
#[test]
fn test_predict() {
let mut dlm = DynamicLinearModel::new(2, 1);
let (a_t, r_t) = dlm.predict();
assert_eq!(a_t.len(), 2);
assert_eq!(r_t.shape(), &[2, 2]);
}
#[test]
fn test_update() {
let mut dlm = DynamicLinearModel::polynomial_trend(0, 1.0, 0.1);
let forecast = dlm.update(1.5).expect("update operation should succeed");
assert!(forecast.is_finite());
}
#[test]
fn test_forecast() {
let dlm = DynamicLinearModel::polynomial_trend(0, 1.0, 0.1);
let forecasts = dlm.forecast(5);
assert_eq!(forecasts.len(), 5);
for (mean, var) in forecasts {
assert!(mean.is_finite());
assert!(var > 0.0);
}
}
#[test]
fn test_fit() {
let mut dlm = DynamicLinearModel::polynomial_trend(1, 1.0, 0.1);
let observations = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let forecasts = dlm
.fit(&observations)
.expect("fit operation should succeed with valid input");
assert_eq!(forecasts.len(), 5);
}
#[test]
fn test_discount_factor() {
let dlm = DynamicLinearModel::polynomial_trend(0, 1.0, 0.1).with_discount_factor(0.95);
assert_eq!(dlm.discount_factor, Some(0.95));
}
#[test]
fn test_initial_state() {
let mean = Array1::from_vec(vec![1.0, 2.0]);
let cov = Array2::eye(2);
let dlm = DynamicLinearModel::new(2, 1).with_initial_state(mean.clone(), cov.clone());
assert_eq!(dlm.state_mean, mean);
assert_eq!(dlm.state_cov, cov);
}
#[test]
fn test_reset() {
let mut dlm = DynamicLinearModel::polynomial_trend(1, 1.0, 0.1);
dlm.update(1.0).expect("update operation should succeed");
dlm.update(2.0).expect("update operation should succeed");
assert_eq!(dlm.time_step, 2);
dlm.reset();
assert_eq!(dlm.time_step, 0);
}
#[test]
fn test_forecast_samples() {
let dlm = DynamicLinearModel::polynomial_trend(0, 1.0, 0.1);
let samples = dlm.forecast_samples(5, 10);
assert_eq!(samples.len(), 10);
assert_eq!(samples[0].len(), 5);
}
#[test]
fn test_smooth() {
let dlm = DynamicLinearModel::polynomial_trend(1, 1.0, 0.1);
let observations = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let smoothed = dlm.smooth(&observations).expect("smoothing should succeed");
assert_eq!(smoothed.len(), 5);
}
#[test]
fn test_linear_trend_fitting() {
let mut dlm = DynamicLinearModel::polynomial_trend(1, 0.1, 0.01);
let true_observations = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let forecasts = dlm
.fit(&true_observations)
.expect("fit operation should succeed with valid input");
let last_forecast = forecasts.last().expect("collection should be non-empty");
assert!(
(last_forecast - 8.0).abs() < 2.0,
"Last forecast should be close to 8.0"
);
}
}