1pub mod dlm;
22pub mod linear_gaussian;
23pub mod structural;
24pub mod switching;
25
26pub use dlm::{DLMBuilder, DynamicLinearModelVec};
27pub use linear_gaussian::{KalmanOutput, LinearGaussianSSM};
28pub use structural::{SeasonalComponent, StructuralModel, TrendComponent};
29pub use switching::{MarkovSwitchingModel, RegimeParameters};
30
31use crate::error::{Result, TimeSeriesError};
32use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
33use std::f64::consts::PI;
34
35#[inline]
41fn mat_mul(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
42 let (m, k) = a.dim();
43 let (k2, n) = b.dim();
44 if k != k2 {
45 return Err(TimeSeriesError::DimensionMismatch {
46 expected: k,
47 actual: k2,
48 });
49 }
50 let mut c = Array2::<f64>::zeros((m, n));
51 for i in 0..m {
52 for j in 0..n {
53 let mut s = 0.0_f64;
54 for l in 0..k {
55 s += a[[i, l]] * b[[l, j]];
56 }
57 c[[i, j]] = s;
58 }
59 }
60 Ok(c)
61}
62
63#[inline]
65fn mat_t(a: &Array2<f64>) -> Array2<f64> {
66 let (m, n) = a.dim();
67 let mut out = Array2::<f64>::zeros((n, m));
68 for i in 0..m {
69 for j in 0..n {
70 out[[j, i]] = a[[i, j]];
71 }
72 }
73 out
74}
75
76#[inline]
78fn mat_add(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
79 let (m, n) = a.dim();
80 if b.dim() != (m, n) {
81 return Err(TimeSeriesError::DimensionMismatch {
82 expected: m * n,
83 actual: b.dim().0 * b.dim().1,
84 });
85 }
86 let mut c = a.clone();
87 for i in 0..m {
88 for j in 0..n {
89 c[[i, j]] += b[[i, j]];
90 }
91 }
92 Ok(c)
93}
94
95#[inline]
97fn mat_sub(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
98 let (m, n) = a.dim();
99 if b.dim() != (m, n) {
100 return Err(TimeSeriesError::DimensionMismatch {
101 expected: m * n,
102 actual: b.dim().0 * b.dim().1,
103 });
104 }
105 let mut c = a.clone();
106 for i in 0..m {
107 for j in 0..n {
108 c[[i, j]] -= b[[i, j]];
109 }
110 }
111 Ok(c)
112}
113
114#[inline]
116fn mat_vec(a: &Array2<f64>, x: &Array1<f64>) -> Result<Array1<f64>> {
117 let (m, n) = a.dim();
118 if x.len() != n {
119 return Err(TimeSeriesError::DimensionMismatch {
120 expected: n,
121 actual: x.len(),
122 });
123 }
124 let mut y = Array1::<f64>::zeros(m);
125 for i in 0..m {
126 for j in 0..n {
127 y[i] += a[[i, j]] * x[j];
128 }
129 }
130 Ok(y)
131}
132
133fn solve_linear(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
136 let n = a.dim().0;
137 if a.dim() != (n, n) {
138 return Err(TimeSeriesError::InvalidInput(
139 "Matrix must be square for linear solve".to_string(),
140 ));
141 }
142 let ncols = b.dim().1;
143
144 let mut lu = a.clone();
146 let mut rhs = b.clone();
147 let mut perm: Vec<usize> = (0..n).collect();
148
149 for col in 0..n {
150 let mut max_val = lu[[col, col]].abs();
152 let mut max_row = col;
153 for row in (col + 1)..n {
154 let v = lu[[row, col]].abs();
155 if v > max_val {
156 max_val = v;
157 max_row = row;
158 }
159 }
160
161 if max_row != col {
162 for j in 0..n {
163 lu.swap([col, j], [max_row, j]);
164 }
165 for j in 0..ncols {
166 rhs.swap([col, j], [max_row, j]);
167 }
168 perm.swap(col, max_row);
169 }
170
171 if lu[[col, col]].abs() < 1e-14 {
172 return Err(TimeSeriesError::NumericalInstability(
173 "Near-singular matrix in linear solve".to_string(),
174 ));
175 }
176
177 for row in (col + 1)..n {
178 let factor = lu[[row, col]] / lu[[col, col]];
179 lu[[row, col]] = factor;
180 for j in (col + 1)..n {
181 let v = lu[[row, j]] - factor * lu[[col, j]];
182 lu[[row, j]] = v;
183 }
184 for j in 0..ncols {
185 let v = rhs[[row, j]] - factor * rhs[[col, j]];
186 rhs[[row, j]] = v;
187 }
188 }
189 }
190
191 let mut x = rhs.clone();
193 for k in (0..n).rev() {
194 for j in 0..ncols {
195 let mut s = x[[k, j]];
196 for l in (k + 1)..n {
197 s -= lu[[k, l]] * x[[l, j]];
198 }
199 x[[k, j]] = s / lu[[k, k]];
200 }
201 }
202
203 Ok(x)
204}
205
206fn mat_inv(a: &Array2<f64>) -> Result<Array2<f64>> {
208 let n = a.dim().0;
209 let eye = Array2::<f64>::eye(n);
210 solve_linear(a, &eye)
211}
212
213fn log_det(a: &Array2<f64>) -> Result<f64> {
216 let n = a.dim().0;
217
218 let mut l = Array2::<f64>::zeros((n, n));
220 let mut ok = true;
221
222 'outer: for i in 0..n {
223 for j in 0..=i {
224 let mut s = a[[i, j]];
225 for k in 0..j {
226 s -= l[[i, k]] * l[[j, k]];
227 }
228 if i == j {
229 if s <= 0.0 {
230 ok = false;
231 break 'outer;
232 }
233 l[[i, j]] = s.sqrt();
234 } else {
235 l[[i, j]] = s / l[[j, j]];
236 }
237 }
238 }
239
240 if ok {
241 let log_diag_sum: f64 = (0..n).map(|i| l[[i, i]].ln()).sum();
242 return Ok(2.0 * log_diag_sum);
243 }
244
245 let mut lu = a.clone();
247 let mut sign = 1.0_f64;
248
249 for col in 0..n {
250 let mut max_val = lu[[col, col]].abs();
251 let mut max_row = col;
252 for row in (col + 1)..n {
253 let v = lu[[row, col]].abs();
254 if v > max_val {
255 max_val = v;
256 max_row = row;
257 }
258 }
259 if max_row != col {
260 for j in 0..n {
261 lu.swap([col, j], [max_row, j]);
262 }
263 sign = -sign;
264 }
265 if lu[[col, col]].abs() < 1e-14 {
266 return Ok(f64::NEG_INFINITY);
267 }
268 for row in (col + 1)..n {
269 let factor = lu[[row, col]] / lu[[col, col]];
270 lu[[row, col]] = factor;
271 for j in (col + 1)..n {
272 let v = lu[[row, j]] - factor * lu[[col, j]];
273 lu[[row, j]] = v;
274 }
275 }
276 }
277
278 let log_diag_sum: f64 = (0..n).map(|i| lu[[i, i]].abs().ln()).sum();
279 Ok(log_diag_sum + if sign < 0.0 { f64::NAN } else { 0.0 })
280}
281
282#[derive(Debug, Clone)]
293pub struct StateSpaceModel {
294 pub transition_matrix: Array2<f64>,
296 pub observation_matrix: Array2<f64>,
298 pub process_noise: Array2<f64>,
300 pub observation_noise: Array2<f64>,
302 pub initial_state: Array1<f64>,
304 pub initial_cov: Array2<f64>,
306}
307
308impl StateSpaceModel {
309 pub fn new(n_states: usize, n_obs: usize) -> Self {
311 Self {
312 transition_matrix: Array2::eye(n_states),
313 observation_matrix: {
314 let mut h = Array2::<f64>::zeros((n_obs, n_states));
315 for i in 0..n_obs.min(n_states) {
316 h[[i, i]] = 1.0;
317 }
318 h
319 },
320 process_noise: Array2::eye(n_states),
321 observation_noise: Array2::eye(n_obs),
322 initial_state: Array1::zeros(n_states),
323 initial_cov: Array2::eye(n_states) * 1e6,
324 }
325 }
326
327 pub fn local_level(sigma_eta: f64, sigma_eps: f64) -> Self {
333 Self {
334 transition_matrix: Array2::eye(1),
335 observation_matrix: Array2::ones((1, 1)),
336 process_noise: Array2::from_elem((1, 1), sigma_eta * sigma_eta),
337 observation_noise: Array2::from_elem((1, 1), sigma_eps * sigma_eps),
338 initial_state: Array1::zeros(1),
339 initial_cov: Array2::from_elem((1, 1), 1e6),
340 }
341 }
342
343 pub fn local_linear_trend(sigma_eta: f64, sigma_zeta: f64, sigma_eps: f64) -> Self {
350 let f = Array2::from_shape_vec((2, 2), vec![1.0, 1.0, 0.0, 1.0]).expect("shape is valid");
352 let h = Array2::from_shape_vec((1, 2), vec![1.0, 0.0]).expect("shape is valid");
353 let q = Array2::from_shape_vec(
354 (2, 2),
355 vec![sigma_eta * sigma_eta, 0.0, 0.0, sigma_zeta * sigma_zeta],
356 )
357 .expect("shape is valid");
358 let r = Array2::from_elem((1, 1), sigma_eps * sigma_eps);
359 Self {
360 transition_matrix: f,
361 observation_matrix: h,
362 process_noise: q,
363 observation_noise: r,
364 initial_state: Array1::zeros(2),
365 initial_cov: Array2::eye(2) * 1e6,
366 }
367 }
368
369 pub fn seasonal(period: usize, sigma_omega: f64) -> Self {
374 if period < 2 {
375 return Self::local_level(sigma_omega, 1.0);
377 }
378 let s = period - 1;
379 let mut f = Array2::<f64>::zeros((s, s));
381 for j in 0..s {
382 f[[0, j]] = -1.0;
383 }
384 for i in 1..s {
385 f[[i, i - 1]] = 1.0;
386 }
387 let mut h = Array2::<f64>::zeros((1, s));
389 h[[0, 0]] = 1.0;
390 let mut q = Array2::<f64>::zeros((s, s));
392 q[[0, 0]] = sigma_omega * sigma_omega;
393
394 Self {
395 transition_matrix: f,
396 observation_matrix: h,
397 process_noise: q,
398 observation_noise: Array2::from_elem((1, 1), 1.0),
399 initial_state: Array1::zeros(s),
400 initial_cov: Array2::eye(s) * 1e4,
401 }
402 }
403
404 pub fn ar(phi: &[f64], sigma_eps: f64) -> Self {
411 let p = phi.len();
412 if p == 0 {
413 return Self::local_level(sigma_eps, sigma_eps);
414 }
415 let mut f = Array2::<f64>::zeros((p, p));
416 for j in 0..p {
417 f[[0, j]] = phi[j];
418 }
419 for i in 1..p {
420 f[[i, i - 1]] = 1.0;
421 }
422 let mut h = Array2::<f64>::zeros((1, p));
423 h[[0, 0]] = 1.0;
424 let mut q = Array2::<f64>::zeros((p, p));
425 q[[0, 0]] = sigma_eps * sigma_eps;
426
427 Self {
428 transition_matrix: f,
429 observation_matrix: h,
430 process_noise: q,
431 observation_noise: Array2::zeros((1, 1)), initial_state: Array1::zeros(p),
433 initial_cov: Array2::eye(p) * 1e4,
434 }
435 }
436
437 pub fn structural(
441 sigma_level: f64,
442 sigma_slope: f64,
443 sigma_seasonal: Option<f64>,
444 period: Option<usize>,
445 sigma_obs: f64,
446 ) -> Self {
447 let trend_dim = 2usize;
448 let seasonal_dim = match (sigma_seasonal, period) {
449 (Some(_), Some(p)) if p >= 2 => p - 1,
450 _ => 0,
451 };
452 let n = trend_dim + seasonal_dim;
453
454 let mut f = Array2::<f64>::zeros((n, n));
456 f[[0, 0]] = 1.0;
458 f[[0, 1]] = 1.0;
459 f[[1, 1]] = 1.0;
460 if seasonal_dim > 0 {
462 let s = seasonal_dim;
463 let off = trend_dim;
464 for j in 0..s {
465 f[[off, off + j]] = -1.0;
466 }
467 for i in 1..s {
468 f[[off + i, off + i - 1]] = 1.0;
469 }
470 }
471
472 let mut h = Array2::<f64>::zeros((1, n));
474 h[[0, 0]] = 1.0; if seasonal_dim > 0 {
476 h[[0, trend_dim]] = 1.0; }
478
479 let mut q = Array2::<f64>::zeros((n, n));
481 q[[0, 0]] = sigma_level * sigma_level;
482 q[[1, 1]] = sigma_slope * sigma_slope;
483 if let (Some(sw), Some(_)) = (sigma_seasonal, period) {
484 if seasonal_dim > 0 {
485 q[[trend_dim, trend_dim]] = sw * sw;
486 }
487 }
488
489 let r = Array2::from_elem((1, 1), sigma_obs * sigma_obs);
490
491 Self {
492 transition_matrix: f,
493 observation_matrix: h,
494 process_noise: q,
495 observation_noise: r,
496 initial_state: Array1::zeros(n),
497 initial_cov: Array2::eye(n) * 1e6,
498 }
499 }
500
501 pub fn n_states(&self) -> usize {
503 self.transition_matrix.dim().0
504 }
505
506 pub fn n_obs(&self) -> usize {
508 self.observation_matrix.dim().0
509 }
510}
511
512#[derive(Debug, Clone)]
518pub struct KalmanFilterResult {
519 pub filtered_states: Array2<f64>,
521 pub filtered_covs: Vec<Array2<f64>>,
523 pub predicted_states: Array2<f64>,
525 pub predicted_covs: Vec<Array2<f64>>,
527 pub innovations: Array2<f64>,
529 pub innovation_covs: Vec<Array2<f64>>,
531 pub log_likelihood: f64,
533}
534
535#[derive(Debug, Clone)]
541pub struct KalmanSmootherResult {
542 pub smoothed_states: Array2<f64>,
544 pub smoothed_covs: Vec<Array2<f64>>,
546 pub log_likelihood: f64,
548}
549
550pub fn kalman_filter(
559 observations: ArrayView2<f64>,
560 model: &StateSpaceModel,
561 handle_missing: bool,
562) -> Result<KalmanFilterResult> {
563 let (t_len, n_obs) = observations.dim();
564 let n_states = model.n_states();
565
566 if model.n_obs() != n_obs {
567 return Err(TimeSeriesError::DimensionMismatch {
568 expected: model.n_obs(),
569 actual: n_obs,
570 });
571 }
572
573 let mut m = model.initial_state.clone(); let mut p = model.initial_cov.clone(); let mut filtered_states = Array2::<f64>::zeros((t_len, n_states));
577 let mut filtered_covs = Vec::with_capacity(t_len);
578 let mut predicted_states = Array2::<f64>::zeros((t_len, n_states));
579 let mut predicted_covs = Vec::with_capacity(t_len);
580 let mut innovations = Array2::<f64>::zeros((t_len, n_obs));
581 let mut innovation_covs = Vec::with_capacity(t_len);
582 let mut log_likelihood = 0.0_f64;
583
584 let f = &model.transition_matrix;
585 let h = &model.observation_matrix;
586 let q = &model.process_noise;
587 let r = &model.observation_noise;
588 let ft = mat_t(f);
589 let ht = mat_t(h);
590
591 for t in 0..t_len {
592 let m_pred = mat_vec(f, &m)?;
595 let fp = mat_mul(f, &p)?;
597 let p_pred = mat_add(&mat_mul(&fp, &ft)?, q)?;
598
599 for j in 0..n_states {
601 predicted_states[[t, j]] = m_pred[j];
602 }
603 predicted_covs.push(p_pred.clone());
604
605 let y_t: Array1<f64> = observations.row(t).to_owned();
607 let is_missing = handle_missing && y_t.iter().any(|&v| v.is_nan());
608
609 if is_missing {
610 for j in 0..n_states {
612 filtered_states[[t, j]] = m_pred[j];
613 }
614 filtered_covs.push(p_pred.clone());
615 let hp = mat_mul(h, &p_pred)?;
617 let s = mat_add(&mat_mul(&hp, &ht)?, r)?;
618 innovation_covs.push(s);
619 for j in 0..n_obs {
620 innovations[[t, j]] = 0.0;
621 }
622 m = m_pred;
623 p = p_pred;
624 continue;
625 }
626
627 let hm = mat_vec(h, &m_pred)?;
630 let v: Array1<f64> = Array1::from_iter((0..n_obs).map(|i| y_t[i] - hm[i]));
631
632 let hp = mat_mul(h, &p_pred)?;
634 let s = mat_add(&mat_mul(&hp, &ht)?, r)?;
635
636 let ph_t = mat_mul(&p_pred, &ht)?;
639 let k = mat_mul(&ph_t, &mat_inv(&s)?)?;
640
641 let kv = mat_vec(&k, &v)?;
643 let m_upd: Array1<f64> = Array1::from_iter((0..n_states).map(|i| m_pred[i] + kv[i]));
644
645 let kh = mat_mul(&k, h)?;
647 let i_kh = mat_sub(&Array2::eye(n_states), &kh)?;
648 let p_upd = mat_mul(&i_kh, &p_pred)?;
649
650 let ld = log_det(&s)?;
652 let s_inv = mat_inv(&s)?;
653 let s_inv_v = mat_vec(&s_inv, &v)?;
654 let quad: f64 = (0..n_obs).map(|i| v[i] * s_inv_v[i]).sum();
655 log_likelihood += -0.5 * ((n_obs as f64) * (2.0 * PI).ln() + ld + quad);
656
657 for j in 0..n_states {
659 filtered_states[[t, j]] = m_upd[j];
660 }
661 for j in 0..n_obs {
662 innovations[[t, j]] = v[j];
663 }
664 filtered_covs.push(p_upd.clone());
665 innovation_covs.push(s);
666
667 m = m_upd;
668 p = p_upd;
669 }
670
671 Ok(KalmanFilterResult {
672 filtered_states,
673 filtered_covs,
674 predicted_states,
675 predicted_covs,
676 innovations,
677 innovation_covs,
678 log_likelihood,
679 })
680}
681
682pub fn kalman_smoother(
688 filter_result: &KalmanFilterResult,
689 model: &StateSpaceModel,
690) -> Result<KalmanSmootherResult> {
691 let t_len = filter_result.filtered_states.dim().0;
692 let n_states = model.n_states();
693
694 if t_len == 0 {
695 return Ok(KalmanSmootherResult {
696 smoothed_states: Array2::zeros((0, n_states)),
697 smoothed_covs: vec![],
698 log_likelihood: filter_result.log_likelihood,
699 });
700 }
701
702 let f = &model.transition_matrix;
703 let ft = mat_t(f);
704
705 let mut smoothed_states = Array2::<f64>::zeros((t_len, n_states));
706 let mut smoothed_covs = vec![Array2::<f64>::zeros((n_states, n_states)); t_len];
707
708 for j in 0..n_states {
710 smoothed_states[[t_len - 1, j]] = filter_result.filtered_states[[t_len - 1, j]];
711 }
712 smoothed_covs[t_len - 1] = filter_result.filtered_covs[t_len - 1].clone();
713
714 for t in (0..t_len - 1).rev() {
716 let p_filt = &filter_result.filtered_covs[t];
717 let p_pred_next = &filter_result.predicted_covs[t + 1];
718
719 let pf_ft = mat_mul(p_filt, &ft)?;
721 let p_pred_inv = mat_inv(p_pred_next)?;
722 let g = mat_mul(&pf_ft, &p_pred_inv)?;
723
724 let m_filt_t: Array1<f64> =
726 Array1::from_iter((0..n_states).map(|j| filter_result.filtered_states[[t, j]]));
727 let m_smooth_next: Array1<f64> =
728 Array1::from_iter((0..n_states).map(|j| smoothed_states[[t + 1, j]]));
729 let m_pred_next: Array1<f64> =
730 Array1::from_iter((0..n_states).map(|j| filter_result.predicted_states[[t + 1, j]]));
731
732 let diff: Array1<f64> =
733 Array1::from_iter((0..n_states).map(|j| m_smooth_next[j] - m_pred_next[j]));
734 let g_diff = mat_vec(&g, &diff)?;
735 for j in 0..n_states {
736 smoothed_states[[t, j]] = m_filt_t[j] + g_diff[j];
737 }
738
739 let p_smooth_next = &smoothed_covs[t + 1];
741 let dp = mat_sub(p_smooth_next, p_pred_next)?;
742 let g_dp = mat_mul(&g, &dp)?;
743 let g_dp_gt = mat_mul(&g_dp, &mat_t(&g))?;
744 smoothed_covs[t] = mat_add(p_filt, &g_dp_gt)?;
745 }
746
747 Ok(KalmanSmootherResult {
748 smoothed_states,
749 smoothed_covs,
750 log_likelihood: filter_result.log_likelihood,
751 })
752}
753
754#[derive(Debug, Clone)]
760pub struct EmResult {
761 pub model: StateSpaceModel,
763 pub log_likelihoods: Vec<f64>,
765 pub n_iterations: usize,
767 pub converged: bool,
769}
770
771pub fn em_estimation(
776 observations: ArrayView2<f64>,
777 initial_model: StateSpaceModel,
778 max_iter: usize,
779 tol: f64,
780 estimate_transition: bool,
781 estimate_obs_matrix: bool,
782) -> Result<EmResult> {
783 let (t_len, n_obs_dim) = observations.dim();
784 if t_len < 2 {
785 return Err(TimeSeriesError::InsufficientData {
786 message: "EM requires at least 2 observations".to_string(),
787 required: 2,
788 actual: t_len,
789 });
790 }
791
792 let mut model = initial_model;
793 let mut log_likelihoods = Vec::with_capacity(max_iter);
794 let mut converged = false;
795
796 for iter in 0..max_iter {
797 let filt = kalman_filter(observations, &model, true)?;
799 let smooth = kalman_smoother(&filt, &model)?;
800 let ll = filt.log_likelihood;
801 log_likelihoods.push(ll);
802
803 if iter > 0 {
804 let prev_ll = log_likelihoods[iter - 1];
805 if (ll - prev_ll).abs() < tol {
806 converged = true;
807 break;
808 }
809 }
810
811 let n_s = model.n_states();
813
814 let mut sum_pp = Array2::<f64>::zeros((n_s, n_s)); let mut sum_pp_lag = Array2::<f64>::zeros((n_s, n_s)); let mut sum_pp_prev = Array2::<f64>::zeros((n_s, n_s)); let mut sum_yp = Array2::<f64>::zeros((n_obs_dim, n_s)); let mut sum_yy = Array2::<f64>::zeros((n_obs_dim, n_obs_dim)); for t in 0..t_len {
824 let x_t: Array1<f64> =
825 Array1::from_iter((0..n_s).map(|j| smooth.smoothed_states[[t, j]]));
826 let e_xxt = outer_product_plus_cov(&x_t, &smooth.smoothed_covs[t]);
827 sum_pp = mat_add(&sum_pp, &e_xxt)?;
828
829 let y_t: Array1<f64> = Array1::from_iter((0..n_obs_dim).map(|j| observations[[t, j]]));
830 let yxt = outer_product(&y_t, &x_t);
831 sum_yp = mat_add(&sum_yp, &yxt)?;
832 let yyt = outer_product(&y_t, &y_t);
833 sum_yy = mat_add(&sum_yy, &yyt)?;
834 }
835
836 let f_ref = &model.transition_matrix;
840 let ft_ref = mat_t(f_ref);
841 for t in 1..t_len {
842 let x_t: Array1<f64> =
843 Array1::from_iter((0..n_s).map(|j| smooth.smoothed_states[[t, j]]));
844 let x_prev: Array1<f64> =
845 Array1::from_iter((0..n_s).map(|j| smooth.smoothed_states[[t - 1, j]]));
846
847 let p_filt_prev = &filt.filtered_covs[t - 1];
849 let p_pred_t = &filt.predicted_covs[t];
850 let pf_ft = mat_mul(p_filt_prev, &ft_ref)?;
851 let p_pred_inv = mat_inv(p_pred_t).unwrap_or_else(|_| Array2::eye(n_s));
852 let g_prev = mat_mul(&pf_ft, &p_pred_inv)?;
853 let p_cross = mat_add(
854 &outer_product(&x_t, &x_prev),
855 &mat_mul(&g_prev, &smooth.smoothed_covs[t])?,
856 )?;
857
858 sum_pp_lag = mat_add(&sum_pp_lag, &p_cross)?;
859 let e_xprev = outer_product_plus_cov(&x_prev, &smooth.smoothed_covs[t - 1]);
860 sum_pp_prev = mat_add(&sum_pp_prev, &e_xprev)?;
861 }
862
863 let t_f64 = t_len as f64;
864 let tm1_f64 = (t_len - 1) as f64;
865
866 if estimate_obs_matrix {
868 let sum_pp_inv = mat_inv(&sum_pp).unwrap_or_else(|_| Array2::eye(n_s));
870 model.observation_matrix = mat_mul(&sum_yp, &sum_pp_inv)?;
871 }
872
873 {
875 let h_new = model.observation_matrix.clone();
876 let h_sum_yp_t = mat_mul(&h_new, &mat_t(&sum_yp))?;
877 let r_new_unnorm = mat_sub(&sum_yy, &h_sum_yp_t)?;
878 let mut r_new = Array2::<f64>::zeros((n_obs_dim, n_obs_dim));
879 for i in 0..n_obs_dim {
880 for j in 0..n_obs_dim {
881 r_new[[i, j]] = r_new_unnorm[[i, j]] / t_f64;
882 }
883 }
884 for i in 0..n_obs_dim {
886 if r_new[[i, i]] < 1e-8 {
887 r_new[[i, i]] = 1e-8;
888 }
889 }
890 model.observation_noise = r_new;
891 }
892
893 if estimate_transition {
895 let sum_pp_prev_inv = mat_inv(&sum_pp_prev).unwrap_or_else(|_| Array2::eye(n_s));
897 model.transition_matrix = mat_mul(&sum_pp_lag, &sum_pp_prev_inv)?;
898 }
899
900 {
902 let f_new = model.transition_matrix.clone();
903 let f_sum_lag_t = mat_mul(&f_new, &mat_t(&sum_pp_lag))?;
904 let q_unnorm = mat_sub(&mat_sub(&sum_pp, &f_sum_lag_t)?, &mat_t(&f_sum_lag_t))?;
905 let f_spp = mat_mul(&f_new, &sum_pp_prev)?;
907 let f_spp_ft = mat_mul(&f_spp, &mat_t(&f_new))?;
908 let q_unnorm2 = mat_add(&q_unnorm, &f_spp_ft)?;
909 let mut q_new = Array2::<f64>::zeros((n_s, n_s));
910 for i in 0..n_s {
911 for j in 0..n_s {
912 q_new[[i, j]] = q_unnorm2[[i, j]] / tm1_f64;
913 }
914 }
915 for i in 0..n_s {
917 if q_new[[i, i]] < 1e-10 {
918 q_new[[i, i]] = 1e-10;
919 }
920 }
921 model.process_noise = q_new;
922 }
923 }
924
925 let n_iterations = log_likelihoods.len();
926 Ok(EmResult {
927 model,
928 log_likelihoods,
929 n_iterations,
930 converged,
931 })
932}
933
934fn outer_product(x: &Array1<f64>, y: &Array1<f64>) -> Array2<f64> {
936 let m = x.len();
937 let n = y.len();
938 let mut out = Array2::<f64>::zeros((m, n));
939 for i in 0..m {
940 for j in 0..n {
941 out[[i, j]] = x[i] * y[j];
942 }
943 }
944 out
945}
946
947fn outer_product_plus_cov(x: &Array1<f64>, cov: &Array2<f64>) -> Array2<f64> {
949 let n = x.len();
950 let mut out = outer_product(x, x);
951 for i in 0..n {
952 for j in 0..n {
953 out[[i, j]] += cov[[i, j]];
954 }
955 }
956 out
957}
958
959pub fn fit_local_level(
967 y: ArrayView1<f64>,
968 max_iter: usize,
969) -> Result<(f64, f64, KalmanSmootherResult)> {
970 let n = y.len();
971 if n < 2 {
972 return Err(TimeSeriesError::InsufficientData {
973 message: "fit_local_level requires at least 2 observations".to_string(),
974 required: 2,
975 actual: n,
976 });
977 }
978
979 let var_data: f64 = {
981 let mean = y.iter().copied().filter(|v| !v.is_nan()).sum::<f64>()
982 / y.iter().filter(|v| !v.is_nan()).count() as f64;
983 y.iter()
984 .filter(|v| !v.is_nan())
985 .map(|v| (v - mean).powi(2))
986 .sum::<f64>()
987 / n as f64
988 };
989 let sigma_eta0 = (var_data * 0.1).sqrt().max(1e-4);
990 let sigma_eps0 = (var_data * 0.9).sqrt().max(1e-4);
991
992 let init_model = StateSpaceModel::local_level(sigma_eta0, sigma_eps0);
993
994 let obs_2d: Array2<f64> = y
996 .iter()
997 .copied()
998 .collect::<Array1<f64>>()
999 .into_shape_with_order((n, 1))
1000 .map_err(|e| TimeSeriesError::ComputationError(format!("Shape error: {e}")))?;
1001
1002 let em_result = em_estimation(obs_2d.view(), init_model, max_iter, 1e-6, false, false)?;
1003
1004 let sigma_eta = em_result.model.process_noise[[0, 0]].sqrt();
1005 let sigma_eps = em_result.model.observation_noise[[0, 0]].sqrt();
1006
1007 let filt = kalman_filter(obs_2d.view(), &em_result.model, true)?;
1008 let smooth = kalman_smoother(&filt, &em_result.model)?;
1009
1010 Ok((sigma_eta, sigma_eps, smooth))
1011}
1012
1013pub fn fit_local_linear_trend(
1017 y: ArrayView1<f64>,
1018 max_iter: usize,
1019) -> Result<(f64, f64, f64, KalmanSmootherResult)> {
1020 let n = y.len();
1021 if n < 3 {
1022 return Err(TimeSeriesError::InsufficientData {
1023 message: "fit_local_linear_trend requires at least 3 observations".to_string(),
1024 required: 3,
1025 actual: n,
1026 });
1027 }
1028
1029 let var_data: f64 = {
1030 let mean = y.iter().copied().filter(|v| !v.is_nan()).sum::<f64>()
1031 / y.iter().filter(|v| !v.is_nan()).count() as f64;
1032 y.iter()
1033 .filter(|v| !v.is_nan())
1034 .map(|v| (v - mean).powi(2))
1035 .sum::<f64>()
1036 / n as f64
1037 };
1038
1039 let sigma_eta0 = (var_data * 0.1).sqrt().max(1e-4);
1040 let sigma_zeta0 = (var_data * 0.01).sqrt().max(1e-5);
1041 let sigma_eps0 = (var_data * 0.5).sqrt().max(1e-4);
1042
1043 let init_model = StateSpaceModel::local_linear_trend(sigma_eta0, sigma_zeta0, sigma_eps0);
1044
1045 let obs_2d: Array2<f64> = y
1046 .iter()
1047 .copied()
1048 .collect::<Array1<f64>>()
1049 .into_shape_with_order((n, 1))
1050 .map_err(|e| TimeSeriesError::ComputationError(format!("Shape error: {e}")))?;
1051
1052 let em_result = em_estimation(obs_2d.view(), init_model, max_iter, 1e-6, false, false)?;
1053
1054 let sigma_level = em_result.model.process_noise[[0, 0]].sqrt();
1055 let sigma_slope = em_result.model.process_noise[[1, 1]].sqrt();
1056 let sigma_obs = em_result.model.observation_noise[[0, 0]].sqrt();
1057
1058 let filt = kalman_filter(obs_2d.view(), &em_result.model, true)?;
1059 let smooth = kalman_smoother(&filt, &em_result.model)?;
1060
1061 Ok((sigma_level, sigma_slope, sigma_obs, smooth))
1062}
1063
1064#[derive(Debug, Clone)]
1070pub struct DynamicLinearModel {
1071 pub base_model: StateSpaceModel,
1073 pub time_varying_obs: Option<Vec<Array2<f64>>>,
1075}
1076
1077impl DynamicLinearModel {
1078 pub fn new(model: StateSpaceModel) -> Self {
1080 Self {
1081 base_model: model,
1082 time_varying_obs: None,
1083 }
1084 }
1085
1086 pub fn with_time_varying_obs(mut self, h_sequence: Vec<Array2<f64>>) -> Self {
1088 self.time_varying_obs = Some(h_sequence);
1089 self
1090 }
1091
1092 pub fn filter(&self, observations: ArrayView2<f64>) -> Result<KalmanFilterResult> {
1094 match &self.time_varying_obs {
1095 None => kalman_filter(observations, &self.base_model, true),
1096 Some(h_seq) => kalman_filter_tv(observations, &self.base_model, h_seq),
1097 }
1098 }
1099
1100 pub fn smooth(&self, observations: ArrayView2<f64>) -> Result<KalmanSmootherResult> {
1102 let filt = self.filter(observations)?;
1103 kalman_smoother(&filt, &self.base_model)
1104 }
1105}
1106
1107fn kalman_filter_tv(
1109 observations: ArrayView2<f64>,
1110 model: &StateSpaceModel,
1111 h_seq: &[Array2<f64>],
1112) -> Result<KalmanFilterResult> {
1113 let (t_len, n_obs) = observations.dim();
1114 let n_states = model.n_states();
1115
1116 if h_seq.len() != t_len {
1117 return Err(TimeSeriesError::InvalidInput(format!(
1118 "h_seq length {} does not match T = {}",
1119 h_seq.len(),
1120 t_len
1121 )));
1122 }
1123
1124 let mut m = model.initial_state.clone();
1125 let mut p = model.initial_cov.clone();
1126
1127 let mut filtered_states = Array2::<f64>::zeros((t_len, n_states));
1128 let mut filtered_covs = Vec::with_capacity(t_len);
1129 let mut predicted_states = Array2::<f64>::zeros((t_len, n_states));
1130 let mut predicted_covs = Vec::with_capacity(t_len);
1131 let mut innovations = Array2::<f64>::zeros((t_len, n_obs));
1132 let mut innovation_covs = Vec::with_capacity(t_len);
1133 let mut log_likelihood = 0.0_f64;
1134
1135 let f = &model.transition_matrix;
1136 let q = &model.process_noise;
1137 let r = &model.observation_noise;
1138 let ft = mat_t(f);
1139
1140 for t in 0..t_len {
1141 let h = &h_seq[t];
1142 let ht = mat_t(h);
1143
1144 let m_pred = mat_vec(f, &m)?;
1146 let fp = mat_mul(f, &p)?;
1147 let p_pred = mat_add(&mat_mul(&fp, &ft)?, q)?;
1148
1149 for j in 0..n_states {
1150 predicted_states[[t, j]] = m_pred[j];
1151 }
1152 predicted_covs.push(p_pred.clone());
1153
1154 let y_t: Array1<f64> = observations.row(t).to_owned();
1155 let is_missing = y_t.iter().any(|&v| v.is_nan());
1156
1157 if is_missing {
1158 for j in 0..n_states {
1159 filtered_states[[t, j]] = m_pred[j];
1160 }
1161 filtered_covs.push(p_pred.clone());
1162 let hp = mat_mul(h, &p_pred)?;
1163 let s = mat_add(&mat_mul(&hp, &ht)?, r)?;
1164 innovation_covs.push(s);
1165 for j in 0..n_obs {
1166 innovations[[t, j]] = 0.0;
1167 }
1168 m = m_pred;
1169 p = p_pred;
1170 continue;
1171 }
1172
1173 let hm = mat_vec(h, &m_pred)?;
1175 let v: Array1<f64> = Array1::from_iter((0..n_obs).map(|i| y_t[i] - hm[i]));
1176 let hp = mat_mul(h, &p_pred)?;
1177 let s = mat_add(&mat_mul(&hp, &ht)?, r)?;
1178 let ph_t = mat_mul(&p_pred, &ht)?;
1179 let k = mat_mul(&ph_t, &mat_inv(&s)?)?;
1180 let kv = mat_vec(&k, &v)?;
1181 let m_upd: Array1<f64> = Array1::from_iter((0..n_states).map(|i| m_pred[i] + kv[i]));
1182 let kh = mat_mul(&k, h)?;
1183 let i_kh = mat_sub(&Array2::eye(n_states), &kh)?;
1184 let p_upd = mat_mul(&i_kh, &p_pred)?;
1185
1186 let ld = log_det(&s)?;
1187 let s_inv = mat_inv(&s)?;
1188 let s_inv_v = mat_vec(&s_inv, &v)?;
1189 let quad: f64 = (0..n_obs).map(|i| v[i] * s_inv_v[i]).sum();
1190 log_likelihood += -0.5 * ((n_obs as f64) * (2.0 * PI).ln() + ld + quad);
1191
1192 for j in 0..n_states {
1193 filtered_states[[t, j]] = m_upd[j];
1194 }
1195 for j in 0..n_obs {
1196 innovations[[t, j]] = v[j];
1197 }
1198 filtered_covs.push(p_upd.clone());
1199 innovation_covs.push(s);
1200
1201 m = m_upd;
1202 p = p_upd;
1203 }
1204
1205 Ok(KalmanFilterResult {
1206 filtered_states,
1207 filtered_covs,
1208 predicted_states,
1209 predicted_covs,
1210 innovations,
1211 innovation_covs,
1212 log_likelihood,
1213 })
1214}
1215
1216pub fn forecast_ssm(
1224 observations: ArrayView1<f64>,
1225 model: &StateSpaceModel,
1226 h: usize,
1227) -> Result<(Array1<f64>, Array1<f64>, Array1<f64>)> {
1228 let n = observations.len();
1229 if n == 0 {
1230 return Err(TimeSeriesError::InsufficientData {
1231 message: "forecast_ssm requires at least one observation".to_string(),
1232 required: 1,
1233 actual: 0,
1234 });
1235 }
1236 if h == 0 {
1237 return Ok((Array1::zeros(0), Array1::zeros(0), Array1::zeros(0)));
1238 }
1239
1240 let obs_2d: Array2<f64> = observations
1242 .iter()
1243 .copied()
1244 .collect::<Array1<f64>>()
1245 .into_shape_with_order((n, 1))
1246 .map_err(|e| TimeSeriesError::ComputationError(format!("Shape error: {e}")))?;
1247
1248 let filt = kalman_filter(obs_2d.view(), model, true)?;
1249
1250 let n_states = model.n_states();
1252 let mut m: Array1<f64> =
1253 Array1::from_iter((0..n_states).map(|j| filt.filtered_states[[n - 1, j]]));
1254 let mut p = filt.filtered_covs[n - 1].clone();
1255
1256 let f = &model.transition_matrix;
1257 let h_mat = &model.observation_matrix;
1258 let q = &model.process_noise;
1259 let r = &model.observation_noise;
1260 let ft = mat_t(f);
1261
1262 let z_95 = 1.959_963_985; let mut means = Array1::<f64>::zeros(h);
1265 let mut lowers = Array1::<f64>::zeros(h);
1266 let mut uppers = Array1::<f64>::zeros(h);
1267
1268 for step in 0..h {
1269 m = mat_vec(f, &m)?;
1271 let fp = mat_mul(f, &p)?;
1272 p = mat_add(&mat_mul(&fp, &ft)?, q)?;
1273
1274 let y_mean = mat_vec(h_mat, &m)?[0];
1276 let hp = mat_mul(h_mat, &p)?;
1277 let s = mat_add(&mat_mul(&hp, &mat_t(h_mat))?, r)?;
1278 let y_var = s[[0, 0]].max(0.0);
1279 let y_std = y_var.sqrt();
1280
1281 means[step] = y_mean;
1282 lowers[step] = y_mean - z_95 * y_std;
1283 uppers[step] = y_mean + z_95 * y_std;
1284 }
1285
1286 Ok((means, lowers, uppers))
1287}
1288
1289pub fn detect_structural_break_ssm(
1297 observations: ArrayView1<f64>,
1298 model: &StateSpaceModel,
1299) -> Result<Vec<(usize, f64)>> {
1300 let n = observations.len();
1301 if n < 3 {
1302 return Err(TimeSeriesError::InsufficientData {
1303 message: "Structural break detection requires at least 3 observations".to_string(),
1304 required: 3,
1305 actual: n,
1306 });
1307 }
1308
1309 let obs_2d: Array2<f64> = observations
1310 .iter()
1311 .copied()
1312 .collect::<Array1<f64>>()
1313 .into_shape_with_order((n, 1))
1314 .map_err(|e| TimeSeriesError::ComputationError(format!("Shape error: {e}")))?;
1315
1316 let filt = kalman_filter(obs_2d.view(), model, true)?;
1317
1318 let innov: Vec<f64> = (0..n).map(|t| filt.innovations[[t, 0]]).collect();
1320 let sigma: f64 = {
1321 let mean = innov.iter().copied().sum::<f64>() / n as f64;
1322 let var = innov.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / n as f64;
1323 var.sqrt().max(1e-12)
1324 };
1325
1326 let mut cusum = 0.0_f64;
1328 let mut results = Vec::with_capacity(n);
1329 for (t, &v) in innov.iter().enumerate() {
1330 cusum += v / sigma;
1331 results.push((t, cusum));
1332 }
1333
1334 Ok(results)
1335}
1336
1337#[cfg(test)]
1342mod tests {
1343 use super::*;
1344 use scirs2_core::ndarray::{array, Array2};
1345
1346 fn to_obs2d(v: &[f64]) -> Array2<f64> {
1348 let n = v.len();
1349 Array2::from_shape_vec((n, 1), v.to_vec()).expect("shape ok")
1350 }
1351
1352 #[test]
1357 fn test_local_level_dims() {
1358 let m = StateSpaceModel::local_level(0.1, 0.5);
1359 assert_eq!(m.n_states(), 1);
1360 assert_eq!(m.n_obs(), 1);
1361 assert!((m.transition_matrix[[0, 0]] - 1.0).abs() < 1e-15);
1362 assert!((m.process_noise[[0, 0]] - 0.01).abs() < 1e-15);
1363 assert!((m.observation_noise[[0, 0]] - 0.25).abs() < 1e-15);
1364 }
1365
1366 #[test]
1367 fn test_local_linear_trend_dims() {
1368 let m = StateSpaceModel::local_linear_trend(0.1, 0.05, 0.2);
1369 assert_eq!(m.n_states(), 2);
1370 assert_eq!(m.n_obs(), 1);
1371 assert_eq!(m.transition_matrix[[0, 0]], 1.0);
1373 assert_eq!(m.transition_matrix[[0, 1]], 1.0);
1374 assert_eq!(m.transition_matrix[[1, 0]], 0.0);
1375 assert_eq!(m.transition_matrix[[1, 1]], 1.0);
1376 }
1377
1378 #[test]
1379 fn test_ar1_state_space() {
1380 let phi = vec![0.8];
1381 let m = StateSpaceModel::ar(&phi, 0.3);
1382 assert_eq!(m.n_states(), 1);
1383 assert_eq!(m.transition_matrix[[0, 0]], 0.8);
1384 assert_eq!(m.observation_matrix[[0, 0]], 1.0);
1385 }
1386
1387 #[test]
1388 fn test_ar2_state_space() {
1389 let phi = vec![0.6, 0.2];
1390 let m = StateSpaceModel::ar(&phi, 0.3);
1391 assert_eq!(m.n_states(), 2);
1392 assert!((m.transition_matrix[[0, 0]] - 0.6).abs() < 1e-12);
1394 assert!((m.transition_matrix[[0, 1]] - 0.2).abs() < 1e-12);
1395 assert_eq!(m.transition_matrix[[1, 0]], 1.0);
1397 }
1398
1399 #[test]
1400 fn test_seasonal_dims() {
1401 let m = StateSpaceModel::seasonal(4, 0.1);
1402 assert_eq!(m.n_states(), 3); assert_eq!(m.n_obs(), 1);
1404 for j in 0..3 {
1406 assert_eq!(m.transition_matrix[[0, j]], -1.0);
1407 }
1408 assert_eq!(m.transition_matrix[[1, 0]], 1.0);
1410 assert_eq!(m.transition_matrix[[2, 1]], 1.0);
1411 }
1412
1413 #[test]
1414 fn test_structural_trend_only() {
1415 let m = StateSpaceModel::structural(0.1, 0.05, None, None, 0.2);
1416 assert_eq!(m.n_states(), 2); assert_eq!(m.n_obs(), 1);
1418 }
1419
1420 #[test]
1421 fn test_structural_trend_plus_seasonal() {
1422 let m = StateSpaceModel::structural(0.1, 0.05, Some(0.02), Some(4), 0.2);
1423 assert_eq!(m.n_states(), 5); assert_eq!(m.n_obs(), 1);
1425 }
1426
1427 #[test]
1432 fn test_filter_constant_signal() {
1433 let mut obs_vec = vec![5.0_f64; 50];
1435 for (i, v) in obs_vec.iter_mut().enumerate() {
1437 *v += 0.1 * ((i as f64 * 0.3).sin());
1438 }
1439 let obs = to_obs2d(&obs_vec);
1440 let model = StateSpaceModel::local_level(0.01, 0.5);
1441 let res = kalman_filter(obs.view(), &model, false).expect("filter ok");
1442 let last = res.filtered_states[[49, 0]];
1443 assert!(
1444 (last - 5.0).abs() < 1.0,
1445 "last filtered state={last} not near 5"
1446 );
1447 assert!(res.log_likelihood.is_finite());
1448 }
1449
1450 #[test]
1451 fn test_filter_log_likelihood_negative() {
1452 let obs = to_obs2d(&[1.0, 2.0, 1.5, 2.5, 1.8]);
1453 let model = StateSpaceModel::local_level(0.1, 0.3);
1454 let res = kalman_filter(obs.view(), &model, false).expect("ok");
1455 assert!(res.log_likelihood.is_finite());
1457 }
1458
1459 #[test]
1460 fn test_filter_missing_data() {
1461 let obs = to_obs2d(&[1.0, f64::NAN, 2.0, f64::NAN, 3.0]);
1462 let model = StateSpaceModel::local_level(0.1, 0.3);
1463 let res = kalman_filter(obs.view(), &model, true).expect("missing ok");
1464 for t in 0..5 {
1466 assert!(res.filtered_states[[t, 0]].is_finite());
1467 }
1468 }
1469
1470 #[test]
1471 fn test_filter_dimension_mismatch_error() {
1472 let obs = to_obs2d(&[1.0, 2.0, 3.0]);
1473 let mut model = StateSpaceModel::new(2, 2);
1475 let result = kalman_filter(obs.view(), &model, false);
1477 assert!(result.is_err());
1478 }
1479
1480 #[test]
1485 fn test_smoother_local_level() {
1486 let data: Vec<f64> = (0..20).map(|i| (i as f64 * 0.1).sin() + 2.0).collect();
1487 let obs = to_obs2d(&data);
1488 let model = StateSpaceModel::local_level(0.1, 0.3);
1489 let filt = kalman_filter(obs.view(), &model, false).expect("filter ok");
1490 let smooth = kalman_smoother(&filt, &model).expect("smoother ok");
1491 assert_eq!(smooth.smoothed_states.dim().0, 20);
1492 assert_eq!(smooth.smoothed_covs.len(), 20);
1493 for t in 0..20 {
1495 let fs = filt.filtered_states[[t, 0]];
1496 let ss = smooth.smoothed_states[[t, 0]];
1497 assert!((fs - ss).abs() < 5.0, "t={t}: filter={fs} smoother={ss}");
1498 }
1499 }
1500
1501 #[test]
1502 fn test_smoother_ll_matches_filter() {
1503 let data = vec![1.0, 1.1, 1.2, 1.1, 1.3, 1.2];
1504 let obs = to_obs2d(&data);
1505 let model = StateSpaceModel::local_level(0.05, 0.1);
1506 let filt = kalman_filter(obs.view(), &model, false).expect("ok");
1507 let smooth = kalman_smoother(&filt, &model).expect("ok");
1508 assert!((filt.log_likelihood - smooth.log_likelihood).abs() < 1e-10);
1509 }
1510
1511 #[test]
1512 fn test_smoother_backward_improves_early_estimates() {
1513 let data: Vec<f64> = (0..30).map(|i| i as f64 + 0.1 * (i as f64).sin()).collect();
1515 let obs = to_obs2d(&data);
1516 let model = StateSpaceModel::local_level(0.5, 0.5);
1517 let filt = kalman_filter(obs.view(), &model, false).expect("ok");
1518 let smooth = kalman_smoother(&filt, &model).expect("ok");
1519 let filt_var = filt.filtered_covs[0][[0, 0]];
1521 let smooth_var = smooth.smoothed_covs[0][[0, 0]];
1522 assert!(
1523 smooth_var <= filt_var + 1e-8,
1524 "smooth_var={smooth_var} > filt_var={filt_var}"
1525 );
1526 }
1527
1528 #[test]
1533 fn test_ar1_filter_tracks_series() {
1534 let n = 40;
1536 let mut data = vec![0.0_f64; n];
1537 for i in 1..n {
1538 data[i] = 0.7 * data[i - 1] + 0.3 * (i as f64 * 0.5).sin();
1539 }
1540 let obs = to_obs2d(&data);
1541 let model = StateSpaceModel::ar(&[0.7], 0.3);
1542 let filt = kalman_filter(obs.view(), &model, false).expect("ok");
1543 for t in 5..n {
1545 assert!(filt.filtered_states[[t, 0]].is_finite());
1546 }
1547 }
1548
1549 #[test]
1554 fn test_local_linear_trend_filter() {
1555 let data: Vec<f64> = (0..25)
1557 .map(|i| i as f64 + 0.05 * (i as f64).cos())
1558 .collect();
1559 let obs = to_obs2d(&data);
1560 let model = StateSpaceModel::local_linear_trend(0.01, 0.001, 0.2);
1561 let filt = kalman_filter(obs.view(), &model, false).expect("ok");
1562 let last_level = filt.filtered_states[[24, 0]];
1564 assert!((last_level - 24.0).abs() < 5.0, "last level={last_level}");
1565 }
1566
1567 #[test]
1572 fn test_forecast_ssm_local_level() {
1573 let data: Vec<f64> = vec![3.0; 20];
1574 let model = StateSpaceModel::local_level(0.01, 0.1);
1575 let (mean, lower, upper) =
1576 forecast_ssm(Array1::from(data).view(), &model, 5).expect("forecast ok");
1577 assert_eq!(mean.len(), 5);
1578 for i in 0..5 {
1580 assert!((mean[i] - 3.0).abs() < 1.0, "mean[{i}]={}", mean[i]);
1581 assert!(lower[i] <= mean[i]);
1582 assert!(upper[i] >= mean[i]);
1583 }
1584 }
1585
1586 #[test]
1587 fn test_forecast_intervals_widen() {
1588 let data: Vec<f64> = (0..20).map(|i| i as f64 * 0.1).collect();
1590 let model = StateSpaceModel::local_level(0.2, 0.1);
1591 let (_, lower, upper) =
1592 forecast_ssm(Array1::from(data).view(), &model, 10).expect("forecast ok");
1593 let width_0 = upper[0] - lower[0];
1595 let width_9 = upper[9] - lower[9];
1596 assert!(width_9 >= width_0, "width_9={width_9} < width_0={width_0}");
1597 }
1598
1599 #[test]
1604 fn test_em_local_level_converges() {
1605 let data: Vec<f64> = (0..60)
1606 .map(|i| 2.0 + 0.5 * (i as f64 * 0.2).sin() + 0.1 * (i as f64 * 0.07).cos())
1607 .collect();
1608 let obs = to_obs2d(&data);
1609 let init = StateSpaceModel::local_level(0.3, 0.5);
1610 let em = em_estimation(obs.view(), init, 100, 1e-5, false, false).expect("em ok");
1611 for i in 1..em.log_likelihoods.len() {
1613 let prev = em.log_likelihoods[i - 1];
1614 let curr = em.log_likelihoods[i];
1615 assert!(
1616 curr >= prev - 1.0,
1617 "LL decreased at iter {i}: {prev} -> {curr}"
1618 );
1619 }
1620 }
1621
1622 #[test]
1623 fn test_em_returns_positive_variances() {
1624 let data: Vec<f64> = (0..30).map(|i| (i as f64 * 0.3).sin()).collect();
1625 let obs = to_obs2d(&data);
1626 let init = StateSpaceModel::local_level(0.2, 0.4);
1627 let em = em_estimation(obs.view(), init, 50, 1e-4, false, false).expect("ok");
1628 assert!(em.model.process_noise[[0, 0]] > 0.0);
1629 assert!(em.model.observation_noise[[0, 0]] > 0.0);
1630 }
1631
1632 #[test]
1637 fn test_fit_local_level() {
1638 let data: Vec<f64> = (0..40)
1639 .map(|i| 5.0 + 0.3 * (i as f64 * 0.4).sin())
1640 .collect();
1641 let arr = Array1::from(data);
1642 let (sigma_eta, sigma_eps, smooth) = fit_local_level(arr.view(), 50).expect("ok");
1643 assert!(sigma_eta > 0.0);
1644 assert!(sigma_eps > 0.0);
1645 assert_eq!(smooth.smoothed_states.dim().0, 40);
1646 }
1647
1648 #[test]
1653 fn test_fit_local_linear_trend() {
1654 let data: Vec<f64> = (0..50)
1655 .map(|i| i as f64 * 0.5 + 0.2 * (i as f64).sin())
1656 .collect();
1657 let arr = Array1::from(data);
1658 let (sigma_level, sigma_slope, sigma_obs, smooth) =
1659 fit_local_linear_trend(arr.view(), 30).expect("ok");
1660 assert!(sigma_level > 0.0);
1661 assert!(sigma_slope > 0.0);
1662 assert!(sigma_obs > 0.0);
1663 assert_eq!(smooth.smoothed_states.dim().0, 50);
1664 }
1665
1666 #[test]
1671 fn test_dlm_static() {
1672 let data: Vec<f64> = vec![1.0, 2.0, 3.0, 2.0, 1.0, 2.0, 3.0];
1673 let obs = to_obs2d(&data);
1674 let model = StateSpaceModel::local_level(0.2, 0.3);
1675 let dlm = DynamicLinearModel::new(model);
1676 let filt = dlm.filter(obs.view()).expect("ok");
1677 assert_eq!(filt.filtered_states.dim().0, 7);
1678 }
1679
1680 #[test]
1681 fn test_dlm_time_varying_obs() {
1682 let n = 5;
1683 let data: Vec<f64> = vec![1.0, 2.0, 1.5, 2.5, 2.0];
1684 let obs = to_obs2d(&data);
1685 let model = StateSpaceModel::local_level(0.1, 0.3);
1686 let h_seq: Vec<Array2<f64>> = (0..n)
1688 .map(|i| {
1689 let v = 1.0 + 0.01 * i as f64;
1690 Array2::from_elem((1, 1), v)
1691 })
1692 .collect();
1693 let dlm = DynamicLinearModel::new(model).with_time_varying_obs(h_seq);
1694 let filt = dlm.filter(obs.view()).expect("time-varying ok");
1695 assert_eq!(filt.filtered_states.dim().0, n);
1696 }
1697
1698 #[test]
1703 fn test_detect_structural_break() {
1704 let mut data: Vec<f64> = (0..15).map(|_| 1.0_f64).collect();
1706 data.extend((0..15).map(|_| 5.0_f64));
1707 let arr = Array1::from(data.clone());
1708 let model = StateSpaceModel::local_level(0.1, 0.5);
1709 let scores = detect_structural_break_ssm(arr.view(), &model).expect("ok");
1710 assert_eq!(scores.len(), 30);
1711 let (break_t, _) = scores
1713 .iter()
1714 .max_by(|a, b| {
1715 a.1.abs()
1716 .partial_cmp(&b.1.abs())
1717 .expect("unexpected None or Err")
1718 })
1719 .copied()
1720 .unwrap_or((0, 0.0));
1721 assert!(break_t >= 10, "break_t={break_t} too early");
1723 }
1724
1725 #[test]
1726 fn test_cusum_monotone_no_break() {
1727 let data: Vec<f64> = (0..30).map(|i| (i as f64 * 0.5).sin()).collect();
1729 let arr = Array1::from(data);
1730 let model = StateSpaceModel::local_level(0.1, 0.3);
1731 let scores = detect_structural_break_ssm(arr.view(), &model).expect("ok");
1732 for (_, s) in &scores {
1734 assert!(s.is_finite());
1735 }
1736 }
1737
1738 #[test]
1743 fn test_innovations_near_zero_perfect_prediction() {
1744 let data: Vec<f64> = vec![3.0; 30];
1747 let obs = to_obs2d(&data);
1748 let model = StateSpaceModel::local_level(0.001, 0.001);
1749 let filt = kalman_filter(obs.view(), &model, false).expect("ok");
1750 for t in 10..30 {
1752 assert!(
1753 filt.innovations[[t, 0]].abs() < 0.1,
1754 "innov at {t}={}",
1755 filt.innovations[[t, 0]]
1756 );
1757 }
1758 }
1759}