use scirs2_core::ndarray::{s, Array1, Array2, Axis};
use crate::error::{Result, TimeSeriesError};
fn matmul_at_b(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
let (m, k) = a.dim();
let (m2, n) = b.dim();
assert_eq!(m, m2, "matmul_at_b: row mismatch");
let mut c = Array2::zeros((k, n));
for i in 0..k {
for j in 0..n {
let mut s = 0.0;
for l in 0..m {
s += a[[l, i]] * b[[l, j]];
}
c[[i, j]] = s;
}
}
c
}
fn matmul(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
let (m, k) = a.dim();
let (k2, n) = b.dim();
assert_eq!(k, k2, "matmul: inner dimension mismatch");
let mut c = Array2::zeros((m, n));
for i in 0..m {
for j in 0..n {
let mut s = 0.0;
for l in 0..k {
s += a[[i, l]] * b[[l, j]];
}
c[[i, j]] = s;
}
}
c
}
fn mat_quad(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
let ab = matmul(a, b);
let (m, k) = a.dim();
let mut c = Array2::zeros((m, m));
for i in 0..m {
for j in 0..m {
let mut s = 0.0;
for l in 0..k {
s += ab[[i, l]] * a[[j, l]];
}
c[[i, j]] = s;
}
}
c
}
fn cholesky_invert(a: &Array2<f64>) -> Result<Array2<f64>> {
let n = a.nrows();
let mut l = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s = a[[i, j]];
for k in 0..j {
s -= l[[i, k]] * l[[j, k]];
}
if i == j {
if s <= 0.0 {
s = s.abs() + 1e-8;
}
l[[i, j]] = s.sqrt();
} else {
l[[i, j]] = s / l[[j, j]];
}
}
}
let mut linv = Array2::<f64>::zeros((n, n));
for i in 0..n {
linv[[i, i]] = 1.0 / l[[i, i]];
for j in 0..i {
let mut s = 0.0;
for k in j..i {
s -= l[[i, k]] * linv[[k, j]];
}
linv[[i, j]] = s / l[[i, i]];
}
}
let mut ainv = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s = 0.0;
for k in i..n {
s += linv[[k, i]] * linv[[k, j]];
}
ainv[[i, j]] = s;
ainv[[j, i]] = s;
}
}
Ok(ainv)
}
fn pca_power_iter(data: &Array2<f64>, r: usize, max_iter: usize) -> (Array2<f64>, Array2<f64>) {
let (t, n) = data.dim();
let r = r.min(n).min(t);
let means: Vec<f64> = (0..n)
.map(|j| data.column(j).iter().sum::<f64>() / t as f64)
.collect();
let mut xc = data.to_owned();
for j in 0..n {
for i in 0..t {
xc[[i, j]] -= means[j];
}
}
let cov = matmul_at_b(&xc, &xc);
let scale = (t.saturating_sub(1).max(1)) as f64;
let mut eigvecs = Array2::<f64>::zeros((n, r));
let mut residual = cov.clone();
for k in 0..r {
let mut v: Vec<f64> = (0..n).map(|i| if i == k { 1.0 } else { 0.01 }).collect();
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
for x in &mut v {
*x /= norm;
}
for _ in 0..max_iter {
let mut w = vec![0.0_f64; n];
for i in 0..n {
for j in 0..n {
w[i] += residual[[i, j]] * v[j];
}
}
let norm_w: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm_w < 1e-15 {
break;
}
let mut diff = 0.0_f64;
for i in 0..n {
let v_new = w[i] / norm_w;
diff += (v_new - v[i]).abs();
v[i] = v_new;
}
if diff < 1e-10 {
break;
}
}
let mut lambda = 0.0_f64;
for i in 0..n {
let mut cv_i = 0.0;
for j in 0..n {
cv_i += cov[[i, j]] * v[j];
}
lambda += v[i] * cv_i;
}
lambda /= scale;
for i in 0..n {
eigvecs[[i, k]] = v[i];
}
for i in 0..n {
for j in 0..n {
residual[[i, j]] -= lambda * v[i] * v[j];
}
}
}
let scores = matmul(&xc, &eigvecs);
(eigvecs, scores)
}
struct KalmanState {
f_filt: Vec<f64>,
p_filt: Array2<f64>,
f_pred: Vec<f64>,
p_pred: Array2<f64>,
innovation: Vec<f64>,
s_innov: Array2<f64>,
}
fn kalman_filter(
data: &Array2<f64>,
lambda: &Array2<f64>,
a: &Array2<f64>,
r_diag: &[f64],
q: &Array2<f64>,
) -> Result<Vec<KalmanState>> {
let (t, n) = data.dim();
let r = lambda.ncols();
let r_mat = {
let mut rm = Array2::<f64>::zeros((n, n));
for i in 0..n {
rm[[i, i]] = r_diag[i].max(1e-8);
}
rm
};
let p0 = Array2::<f64>::eye(r) * 1.0;
let f0 = vec![0.0; r];
let mut f_pred = f0.clone();
let mut p_pred = p0.clone();
let mut states = Vec::with_capacity(t);
for t_idx in 0..t {
let x_t: Vec<f64> = (0..n).map(|j| data[[t_idx, j]]).collect();
let lambda_f: Vec<f64> = (0..n)
.map(|j| (0..r).map(|k| lambda[[j, k]] * f_pred[k]).sum::<f64>())
.collect();
let innov: Vec<f64> = (0..n).map(|j| x_t[j] - lambda_f[j]).collect();
let lambda_p = matmul(lambda, &p_pred); let mut s = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
let mut val = r_mat[[i, j]];
for k in 0..r {
val += lambda_p[[i, k]] * lambda[[j, k]];
}
s[[i, j]] = val;
}
}
let s_inv = cholesky_invert(&s)?;
let p_lambda_t: Array2<f64> = {
let mut pl = Array2::<f64>::zeros((r, n));
for i in 0..r {
for j in 0..n {
let mut val = 0.0;
for k in 0..r {
val += p_pred[[i, k]] * lambda[[j, k]];
}
pl[[i, j]] = val;
}
}
pl
};
let k_gain = matmul(&p_lambda_t, &s_inv);
let mut f_filt = f_pred.clone();
for i in 0..r {
for j in 0..n {
f_filt[i] += k_gain[[i, j]] * innov[j];
}
}
let mut kl = Array2::<f64>::zeros((r, r));
for i in 0..r {
for j in 0..r {
for k in 0..n {
kl[[i, j]] += k_gain[[i, k]] * lambda[[k, j]];
}
}
}
let i_kl = {
let mut ikl = Array2::<f64>::eye(r);
for i in 0..r {
for j in 0..r {
ikl[[i, j]] -= kl[[i, j]];
}
}
ikl
};
let p_filt = matmul(&i_kl, &p_pred);
states.push(KalmanState {
f_filt: f_filt.clone(),
p_filt: p_filt.clone(),
f_pred: f_pred.clone(),
p_pred: p_pred.clone(),
innovation: innov,
s_innov: s,
});
f_pred = (0..r)
.map(|i| (0..r).map(|j| a[[i, j]] * f_filt[j]).sum::<f64>())
.collect();
p_pred = {
let ap = matmul(a, &p_filt);
let mut apat = mat_quad(a, &p_filt);
for i in 0..r {
for j in 0..r {
apat[[i, j]] += q[[i, j]];
let _ = ap[[i, j]]; }
}
apat
};
}
Ok(states)
}
fn kalman_smoother(
states: &[KalmanState],
a: &Array2<f64>,
q: &Array2<f64>,
) -> Result<(Vec<Vec<f64>>, Vec<Array2<f64>>)> {
let t = states.len();
let r = a.nrows();
let mut f_smooth = vec![vec![0.0; r]; t];
let mut p_smooth = vec![Array2::<f64>::zeros((r, r)); t];
f_smooth[t - 1] = states[t - 1].f_filt.clone();
p_smooth[t - 1] = states[t - 1].p_filt.clone();
for t_idx in (0..t - 1).rev() {
let p_t = &states[t_idx].p_filt;
let p_next_pred = &states[t_idx + 1].p_pred;
let p_inv = cholesky_invert(p_next_pred)?;
let ap_t = matmul(a, p_t); let mut l = Array2::<f64>::zeros((r, r));
for i in 0..r {
for j in 0..r {
let mut s = 0.0;
for k in 0..r {
s += ap_t[[k, i]] * p_inv[[k, j]]; }
l[[i, j]] = s;
}
}
let f_next_smooth = &f_smooth[t_idx + 1];
let f_next_pred = &states[t_idx + 1].f_pred;
let mut f_s = states[t_idx].f_filt.clone();
for i in 0..r {
for j in 0..r {
f_s[i] += l[[i, j]] * (f_next_smooth[j] - f_next_pred[j]);
}
}
f_smooth[t_idx] = f_s;
let mut p_s = p_t.clone();
let p_diff = {
let mut pd = p_smooth[t_idx + 1].clone();
for i in 0..r {
for j in 0..r {
pd[[i, j]] -= p_next_pred[[i, j]];
}
}
pd
};
let lp = matmul(&l, &p_diff);
for i in 0..r {
for j in 0..r {
let mut s = 0.0;
for k in 0..r {
s += lp[[i, k]] * l[[j, k]];
}
p_s[[i, j]] += s;
}
}
p_smooth[t_idx] = p_s;
}
Ok((f_smooth, p_smooth))
}
#[derive(Debug, Clone)]
pub struct DynamicFactorModel {
pub n_series: usize,
pub n_factors: usize,
pub n_obs: usize,
pub loadings: Array2<f64>,
pub transition: Array2<f64>,
pub idio_var: Vec<f64>,
pub factor_cov: Array2<f64>,
smoothed_factors: Array2<f64>,
series_means: Vec<f64>,
series_stds: Vec<f64>,
pub log_likelihood: f64,
pub n_iter: usize,
}
impl DynamicFactorModel {
pub fn fit(data: &Array2<f64>, n_factors: usize, max_iter: usize) -> Result<Self> {
let (t, n) = data.dim();
if n_factors == 0 {
return Err(TimeSeriesError::InvalidInput(
"n_factors must be at least 1".to_string(),
));
}
if n_factors >= n {
return Err(TimeSeriesError::InvalidInput(format!(
"n_factors ({n_factors}) must be less than n_series ({n})"
)));
}
if t < 10 {
return Err(TimeSeriesError::InvalidInput(
"Need at least 10 observations".to_string(),
));
}
let means: Vec<f64> = (0..n)
.map(|j| data.column(j).iter().sum::<f64>() / t as f64)
.collect();
let stds: Vec<f64> = (0..n)
.map(|j| {
let m = means[j];
let v: f64 = data.column(j).iter().map(|&x| (x - m).powi(2)).sum::<f64>() / t as f64;
v.sqrt().max(1e-8)
})
.collect();
let mut xstd = data.to_owned();
for j in 0..n {
for i in 0..t {
xstd[[i, j]] = (data[[i, j]] - means[j]) / stds[j];
}
}
let (loadings_pca, factors_pca) =
pca_power_iter(&xstd, n_factors, 500);
let mut lambda = loadings_pca.clone(); let mut a = {
let mut trans = Array2::<f64>::zeros((n_factors, n_factors));
for k in 0..n_factors {
let mut xy = 0.0_f64;
let mut xx = 0.0_f64;
for i in 1..t {
xy += factors_pca[[i, k]] * factors_pca[[i - 1, k]];
xx += factors_pca[[i - 1, k]].powi(2);
}
trans[[k, k]] = if xx > 0.0 { (xy / xx).clamp(-0.99, 0.99) } else { 0.5 };
}
trans
};
let mut r_diag: Vec<f64> = {
let predicted = matmul(&factors_pca, &lambda.t().to_owned()); (0..n)
.map(|j| {
let v: f64 = (0..t)
.map(|i| (xstd[[i, j]] - predicted[[i, j]]).powi(2))
.sum::<f64>()
/ t as f64;
v.max(1e-4)
})
.collect()
};
let mut q = Array2::<f64>::eye(n_factors) * 0.1;
let mut log_likelihood = f64::NEG_INFINITY;
let tol = 1e-6;
let mut n_iter = 0;
for iter in 0..max_iter {
n_iter = iter + 1;
let states = kalman_filter(&xstd, &lambda, &a, &r_diag, &q)?;
let (f_smooth, p_smooth) = kalman_smoother(&states, &a, &q)?;
let new_ll = {
use std::f64::consts::PI;
let mut ll = 0.0;
for state in &states {
let s_inv = cholesky_invert(&state.s_innov).unwrap_or_else(|_| Array2::eye(n));
let mut s_det_ln = 0.0;
for i in 0..n {
s_det_ln += state.s_innov[[i, i]].abs().ln();
}
let mut qform = 0.0;
for i in 0..n {
for j in 0..n {
qform += state.innovation[i] * s_inv[[i, j]] * state.innovation[j];
}
}
ll -= 0.5 * (n as f64 * (2.0 * PI).ln() + s_det_ln + qform);
}
ll
};
let ll_change = (new_ll - log_likelihood).abs();
log_likelihood = new_ll;
let mut s_ff = Array2::<f64>::zeros((n_factors, n_factors));
let mut s_ff1 = Array2::<f64>::zeros((n_factors, n_factors));
let mut s_f1f1 = Array2::<f64>::zeros((n_factors, n_factors));
for t_idx in 0..t {
for i in 0..n_factors {
for j in 0..n_factors {
s_ff[[i, j]] += p_smooth[t_idx][[i, j]]
+ f_smooth[t_idx][i] * f_smooth[t_idx][j];
}
}
}
for t_idx in 1..t {
for i in 0..n_factors {
for j in 0..n_factors {
s_ff1[[i, j]] += f_smooth[t_idx][i] * f_smooth[t_idx - 1][j];
s_f1f1[[i, j]] += p_smooth[t_idx - 1][[i, j]]
+ f_smooth[t_idx - 1][i] * f_smooth[t_idx - 1][j];
}
}
}
if let Ok(s_f1f1_inv) = cholesky_invert(&s_f1f1) {
a = matmul(&s_ff1, &s_f1f1_inv);
for i in 0..n_factors {
for j in 0..n_factors {
if a[[i, j]].abs() > 0.99 {
a[[i, j]] = a[[i, j]].signum() * 0.99;
}
}
}
}
let a_sff1t = {
let sff1t = s_ff1.t().to_owned();
matmul(&a, &sff1t)
};
let mut q_new = s_ff.clone();
for i in 0..n_factors {
for j in 0..n_factors {
q_new[[i, j]] = (s_ff[[i, j]] - a_sff1t[[i, j]]) / t as f64;
if i == j && q_new[[i, j]] <= 0.0 {
q_new[[i, j]] = 1e-6;
}
}
}
q = q_new;
let mut xf = Array2::<f64>::zeros((n, n_factors));
for t_idx in 0..t {
for i in 0..n {
for k in 0..n_factors {
xf[[i, k]] += xstd[[t_idx, i]] * f_smooth[t_idx][k];
}
}
}
if let Ok(s_ff_inv) = cholesky_invert(&s_ff) {
lambda = matmul(&xf, &s_ff_inv);
}
let lambda_sff = matmul(&lambda, &s_ff); for j in 0..n {
let x_sq: f64 = (0..t).map(|i| xstd[[i, j]].powi(2)).sum();
let xf_term: f64 = (0..n_factors).map(|k| xf[[j, k]] * lambda[[j, k]]).sum();
let lsfl: f64 = (0..n_factors)
.map(|k| lambda_sff[[j, k]] * lambda[[j, k]])
.sum();
r_diag[j] = ((x_sq - 2.0 * xf_term + lsfl) / t as f64).max(1e-6);
}
if iter > 0 && ll_change < tol {
break;
}
}
let states = kalman_filter(&xstd, &lambda, &a, &r_diag, &q)?;
let (f_smooth, _) = kalman_smoother(&states, &a, &q)?;
let mut smoothed_factors = Array2::<f64>::zeros((t, n_factors));
for i in 0..t {
for k in 0..n_factors {
smoothed_factors[[i, k]] = f_smooth[i][k];
}
}
Ok(DynamicFactorModel {
n_series: n,
n_factors,
n_obs: t,
loadings: lambda,
transition: a,
idio_var: r_diag,
factor_cov: q,
smoothed_factors,
series_means: means,
series_stds: stds,
log_likelihood,
n_iter,
})
}
pub fn factors(&self) -> &Array2<f64> {
&self.smoothed_factors
}
pub fn fitted_values(&self) -> Array2<f64> {
let (t, r) = self.smoothed_factors.dim();
let n = self.n_series;
let mut fitted = Array2::<f64>::zeros((t, n));
for i in 0..t {
for j in 0..n {
let mut val = 0.0;
for k in 0..r {
val += self.smoothed_factors[[i, k]] * self.loadings[[j, k]];
}
fitted[[i, j]] = val * self.series_stds[j] + self.series_means[j];
}
}
fitted
}
pub fn forecast(&self, h: usize) -> Result<Array2<f64>> {
if h == 0 {
return Err(TimeSeriesError::InvalidInput("h must be at least 1".to_string()));
}
let n = self.n_series;
let r = self.n_factors;
let t = self.n_obs;
let mut f_curr: Vec<f64> = (0..r).map(|k| self.smoothed_factors[[t - 1, k]]).collect();
let mut forecasts = Array2::<f64>::zeros((h, n));
for step in 0..h {
let f_next: Vec<f64> = (0..r)
.map(|i| (0..r).map(|j| self.transition[[i, j]] * f_curr[j]).sum::<f64>())
.collect();
f_curr = f_next;
for j in 0..n {
let val: f64 = (0..r).map(|k| self.loadings[[j, k]] * f_curr[k]).sum();
forecasts[[step, j]] = val * self.series_stds[j] + self.series_means[j];
}
}
Ok(forecasts)
}
pub fn aic(&self) -> f64 {
let k = (self.n_series * self.n_factors + self.n_factors * self.n_factors + self.n_series)
as f64;
-2.0 * self.log_likelihood + 2.0 * k
}
pub fn bic(&self) -> f64 {
let k = (self.n_series * self.n_factors + self.n_factors * self.n_factors + self.n_series)
as f64;
let n = self.n_obs as f64;
-2.0 * self.log_likelihood + k * n.ln()
}
}
#[derive(Debug, Clone)]
pub struct PcaForecast {
pub n_series: usize,
pub n_components: usize,
pub n_obs: usize,
pub loadings: Array2<f64>,
scores: Array2<f64>,
var_coef: Array2<f64>,
score_means: Vec<f64>,
series_means: Vec<f64>,
series_stds: Vec<f64>,
pub explained_variance_ratio: Vec<f64>,
}
impl PcaForecast {
pub fn fit(data: &Array2<f64>, n_components: usize) -> Result<Self> {
let (t, n) = data.dim();
if n_components == 0 {
return Err(TimeSeriesError::InvalidInput(
"n_components must be at least 1".to_string(),
));
}
if n_components > n {
return Err(TimeSeriesError::InvalidInput(format!(
"n_components ({n_components}) cannot exceed n_series ({n})"
)));
}
if t < 3 {
return Err(TimeSeriesError::InvalidInput(
"Need at least 3 observations".to_string(),
));
}
let means: Vec<f64> = (0..n)
.map(|j| data.column(j).iter().sum::<f64>() / t as f64)
.collect();
let stds: Vec<f64> = (0..n)
.map(|j| {
let m = means[j];
let v: f64 = data.column(j).iter().map(|&x| (x - m).powi(2)).sum::<f64>() / t as f64;
v.sqrt().max(1e-8)
})
.collect();
let mut xstd = data.to_owned();
for j in 0..n {
for i in 0..t {
xstd[[i, j]] = (data[[i, j]] - means[j]) / stds[j];
}
}
let (loadings, scores) = pca_power_iter(&xstd, n_components, 1000);
let total_var: f64 = (0..n)
.map(|j| xstd.column(j).iter().map(|&x| x.powi(2)).sum::<f64>() / t as f64)
.sum();
let score_vars: Vec<f64> = (0..n_components)
.map(|k| {
scores.column(k).iter().map(|&x| x.powi(2)).sum::<f64>() / t as f64
})
.collect();
let evr: Vec<f64> = score_vars
.iter()
.map(|&v| if total_var > 0.0 { v / total_var } else { 0.0 })
.collect();
let score_means: Vec<f64> = (0..n_components)
.map(|k| scores.column(k).iter().sum::<f64>() / t as f64)
.collect();
let mut var_coef = Array2::<f64>::zeros((n_components, n_components));
for k in 0..n_components {
let mut xy = 0.0_f64;
let mut xx = 0.0_f64;
for i in 1..t {
xy += scores[[i, k]] * scores[[i - 1, k]];
xx += scores[[i - 1, k]].powi(2);
}
var_coef[[k, k]] = if xx > 1e-10 {
(xy / xx).clamp(-0.99, 0.99)
} else {
0.0
};
}
Ok(PcaForecast {
n_series: n,
n_components,
n_obs: t,
loadings,
scores,
var_coef,
score_means,
series_means: means,
series_stds: stds,
explained_variance_ratio: evr,
})
}
pub fn forecast(&self, h: usize) -> Result<Array2<f64>> {
if h == 0 {
return Err(TimeSeriesError::InvalidInput("h must be at least 1".to_string()));
}
let n = self.n_series;
let r = self.n_components;
let t = self.n_obs;
let mut f_curr: Vec<f64> = (0..r).map(|k| self.scores[[t - 1, k]]).collect();
let mut forecasts = Array2::<f64>::zeros((h, n));
for step in 0..h {
let f_next: Vec<f64> = (0..r)
.map(|i| (0..r).map(|j| self.var_coef[[i, j]] * f_curr[j]).sum::<f64>())
.collect();
f_curr = f_next.clone();
for j in 0..n {
let val: f64 = (0..r).map(|k| self.loadings[[j, k]] * f_curr[k]).sum();
forecasts[[step, j]] = val * self.series_stds[j] + self.series_means[j];
}
}
Ok(forecasts)
}
pub fn fitted_values(&self) -> Array2<f64> {
let t = self.n_obs;
let n = self.n_series;
let r = self.n_components;
let mut fitted = Array2::<f64>::zeros((t, n));
for i in 0..t {
for j in 0..n {
let val: f64 = (0..r).map(|k| self.scores[[i, k]] * self.loadings[[j, k]]).sum();
fitted[[i, j]] = val * self.series_stds[j] + self.series_means[j];
}
}
fitted
}
pub fn total_explained_variance(&self) -> f64 {
self.explained_variance_ratio.iter().sum()
}
}
pub use DynamicFactorModel as DFM;
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::Array2;
fn make_panel(t: usize, n: usize, n_factors: usize) -> Array2<f64> {
let mut data = Array2::<f64>::zeros((t, n));
for obs in 0..t {
let factors: Vec<f64> = (0..n_factors)
.map(|k| ((obs as f64 * (k + 1) as f64 * 0.15).sin()))
.collect();
for j in 0..n {
let loading = (j as f64 + 1.0) / n as f64;
data[[obs, j]] = factors.iter().enumerate().map(|(k, &f)| f * loading * (k as f64 + 1.0)).sum::<f64>();
let noise = (obs * n + j) as f64 * 0.001 % 0.01 - 0.005;
data[[obs, j]] += noise;
}
}
data
}
#[test]
fn test_dfm_fit_basic() {
let data = make_panel(50, 6, 2);
let dfm = DynamicFactorModel::fit(&data, 2, 10).expect("DFM should fit");
assert_eq!(dfm.n_series, 6);
assert_eq!(dfm.n_factors, 2);
assert_eq!(dfm.n_obs, 50);
assert_eq!(dfm.factors().dim(), (50, 2));
}
#[test]
fn test_dfm_fitted_values_shape() {
let data = make_panel(40, 5, 2);
let dfm = DynamicFactorModel::fit(&data, 2, 5).expect("DFM should fit");
let fv = dfm.fitted_values();
assert_eq!(fv.dim(), (40, 5));
}
#[test]
fn test_dfm_forecast_shape() {
let data = make_panel(40, 5, 2);
let dfm = DynamicFactorModel::fit(&data, 2, 5).expect("DFM should fit");
let fc = dfm.forecast(4).expect("Should forecast");
assert_eq!(fc.dim(), (4, 5));
for v in fc.iter() {
assert!(v.is_finite(), "Forecast value must be finite: {v}");
}
}
#[test]
fn test_dfm_invalid_n_factors() {
let data = make_panel(30, 4, 2);
assert!(DynamicFactorModel::fit(&data, 4, 5).is_err());
assert!(DynamicFactorModel::fit(&data, 0, 5).is_err());
}
#[test]
fn test_pca_forecast_fit() {
let data = make_panel(50, 8, 3);
let pca = PcaForecast::fit(&data, 3).expect("PCA should fit");
assert_eq!(pca.n_series, 8);
assert_eq!(pca.n_components, 3);
assert_eq!(pca.n_obs, 50);
assert_eq!(pca.loadings.dim(), (8, 3));
}
#[test]
fn test_pca_forecast_forecast_shape() {
let data = make_panel(50, 6, 2);
let pca = PcaForecast::fit(&data, 2).expect("PCA should fit");
let fc = pca.forecast(5).expect("Should forecast");
assert_eq!(fc.dim(), (5, 6));
for v in fc.iter() {
assert!(v.is_finite(), "Forecast value must be finite: {v}");
}
}
#[test]
fn test_pca_explained_variance() {
let data = make_panel(60, 8, 3);
let pca = PcaForecast::fit(&data, 3).expect("PCA should fit");
let total = pca.total_explained_variance();
assert!(total > 0.0, "Explained variance should be positive");
assert!(total <= 1.01, "Explained variance should be <= 1 (got {total})");
assert_eq!(pca.explained_variance_ratio.len(), 3);
}
#[test]
fn test_pca_fitted_values_shape() {
let data = make_panel(40, 5, 2);
let pca = PcaForecast::fit(&data, 2).expect("PCA should fit");
let fv = pca.fitted_values();
assert_eq!(fv.dim(), (40, 5));
}
#[test]
fn test_pca_forecast_invalid_n_components() {
let data = make_panel(30, 4, 2);
assert!(PcaForecast::fit(&data, 0).is_err());
assert!(PcaForecast::fit(&data, 5).is_err()); }
#[test]
fn test_dfm_alias() {
let data = make_panel(40, 5, 2);
let _ = DFM::fit(&data, 2, 5).expect("DFM alias should work");
}
}