use crate::error::TimeSeriesError;
use super::CausalityResult;
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SvarIdent {
Cholesky,
LongRun,
SignRestriction,
}
impl Default for SvarIdent {
fn default() -> Self {
Self::Cholesky
}
}
#[derive(Debug, Clone)]
pub struct SvarConfig {
pub max_lag: usize,
pub identification: SvarIdent,
pub irf_horizon: usize,
pub sign_restrictions: Option<Vec<Vec<f64>>>,
}
impl Default for SvarConfig {
fn default() -> Self {
Self {
max_lag: 1,
identification: SvarIdent::Cholesky,
irf_horizon: 12,
sign_restrictions: None,
}
}
}
#[derive(Debug, Clone)]
pub struct SvarResult {
pub n_vars: usize,
pub lag_order: usize,
pub a_matrices: Vec<Vec<Vec<f64>>>,
pub sigma: Vec<Vec<f64>>,
pub b_matrix: Vec<Vec<f64>>,
pub irf: Vec<Vec<Vec<f64>>>,
pub fevd: Vec<Vec<f64>>,
}
pub struct SvarModel {
config: SvarConfig,
}
impl SvarModel {
pub fn new(config: SvarConfig) -> Self {
Self { config }
}
pub fn fit(&self, data: &[Vec<f64>]) -> CausalityResult<SvarResult> {
let t = data.len();
if t == 0 {
return Err(TimeSeriesError::InvalidInput(
"Data must be non-empty".to_string(),
));
}
let n = data[0].len();
if n == 0 {
return Err(TimeSeriesError::InvalidInput(
"Data must have at least one variable".to_string(),
));
}
let p = self.config.max_lag;
if t <= p + n {
return Err(TimeSeriesError::InsufficientData {
message: "Too few observations for VAR estimation".to_string(),
required: p + n + 1,
actual: t,
});
}
let (a_matrices, sigma) = fit_var(data, p)?;
let b_matrix = match self.config.identification {
SvarIdent::Cholesky => cholesky_identification(&sigma)?,
SvarIdent::LongRun => long_run_identification(&a_matrices, &sigma, p)?,
SvarIdent::SignRestriction => {
let restrictions = self.config.sign_restrictions.as_deref().ok_or_else(|| {
TimeSeriesError::InvalidInput(
"sign_restrictions must be provided for SignRestriction identification"
.to_string(),
)
})?;
sign_restriction_identification(&sigma, restrictions)?
}
};
let horizon = self.config.irf_horizon;
let irf = compute_irf(&a_matrices, &b_matrix, n, p, horizon)?;
let fevd = compute_fevd(&irf, n, horizon)?;
Ok(SvarResult {
n_vars: n,
lag_order: p,
a_matrices,
sigma,
b_matrix,
irf,
fevd,
})
}
}
pub fn fit_var(
data: &[Vec<f64>],
p: usize,
) -> CausalityResult<(Vec<Vec<Vec<f64>>>, Vec<Vec<f64>>)> {
let t = data.len();
let n = data[0].len();
let n_eff = t - p; let k = n * p;
if n_eff < k + 1 {
return Err(TimeSeriesError::InsufficientData {
message: "Too few effective observations for OLS".to_string(),
required: k + 2,
actual: n_eff,
});
}
let mut x_mat = vec![vec![0.0_f64; k]; n_eff];
let mut y_mat = vec![vec![0.0_f64; n]; n_eff];
for row in 0..n_eff {
let t_now = row + p;
for j in 0..n {
y_mat[row][j] = data[t_now][j];
}
for lag in 1..=p {
let t_lag = t_now - lag;
for j in 0..n {
x_mat[row][(lag - 1) * n + j] = data[t_lag][j];
}
}
}
let xtx = mat_mul_t(&x_mat, &x_mat, n_eff, k, k); let mut a_flat = vec![vec![0.0_f64; k]; n];
for i in 0..n {
let xty: Vec<f64> = (0..k)
.map(|jj| (0..n_eff).map(|row| x_mat[row][jj] * y_mat[row][i]).sum())
.collect();
let beta = solve_linear_system_vec(&xtx, &xty)?;
a_flat[i] = beta;
}
let mut a_matrices: Vec<Vec<Vec<f64>>> = vec![vec![vec![0.0; n]; n]; p];
for lag_idx in 0..p {
for i in 0..n {
for j in 0..n {
a_matrices[lag_idx][i][j] = a_flat[i][lag_idx * n + j];
}
}
}
let mut residuals = vec![vec![0.0_f64; n]; n_eff];
for row in 0..n_eff {
for i in 0..n {
let mut pred = 0.0_f64;
for jj in 0..k {
pred += x_mat[row][jj] * a_flat[i][jj];
}
residuals[row][i] = y_mat[row][i] - pred;
}
}
let denom = (n_eff - k) as f64;
let denom = if denom > 0.0 { denom } else { 1.0 };
let mut sigma = vec![vec![0.0_f64; n]; n];
for row in 0..n_eff {
for i in 0..n {
for j in 0..n {
sigma[i][j] += residuals[row][i] * residuals[row][j];
}
}
}
for i in 0..n {
for j in 0..n {
sigma[i][j] /= denom;
}
}
Ok((a_matrices, sigma))
}
pub fn cholesky_identification(sigma: &[Vec<f64>]) -> CausalityResult<Vec<Vec<f64>>> {
cholesky_decomp(sigma)
}
pub fn long_run_identification(
a_matrices: &[Vec<Vec<f64>>],
sigma: &[Vec<f64>],
_p: usize,
) -> CausalityResult<Vec<Vec<f64>>> {
let n = sigma.len();
let mut sum_a = vec![vec![0.0_f64; n]; n];
for a in a_matrices {
for i in 0..n {
for j in 0..n {
sum_a[i][j] += a[i][j];
}
}
}
let mut c_inv = vec![vec![0.0_f64; n]; n]; for i in 0..n {
for j in 0..n {
c_inv[i][j] = if i == j { 1.0 } else { 0.0 } - sum_a[i][j];
}
}
let c = mat_inverse(&c_inv)?;
let csigct = mat_mul_matmul(&c, sigma, &transpose(&c), n);
let chol_lr = cholesky_decomp(&csigct)?;
let b = mat_mul_mat(&c_inv, &chol_lr, n);
Ok(b)
}
pub fn sign_restriction_identification(
sigma: &[Vec<f64>],
restrictions: &[Vec<f64>],
) -> CausalityResult<Vec<Vec<f64>>> {
let n = sigma.len();
let b0 = cholesky_decomp(sigma)?;
let mut lcg_state: u64 = 0xdeadbeef_cafe1234;
let max_draws = 10_000usize;
for _ in 0..max_draws {
let mut rand_mat = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
rand_mat[i][j] = lcg_normal(&mut lcg_state);
}
}
let q = gram_schmidt(&rand_mat, n);
let b_cand = mat_mul_mat(&b0, &q, n);
if check_sign_restrictions(&b_cand, restrictions) {
return Ok(b_cand);
}
}
Ok(b0)
}
pub fn compute_irf(
a_matrices: &[Vec<Vec<f64>>],
b_matrix: &[Vec<f64>],
n: usize,
p: usize,
horizon: usize,
) -> CausalityResult<Vec<Vec<Vec<f64>>>> {
let mut phi: Vec<Vec<Vec<f64>>> = Vec::with_capacity(horizon + 1);
phi.push(b_matrix.to_vec());
for h in 1..=horizon {
let mut phi_h = vec![vec![0.0_f64; n]; n];
let max_k = h.min(p);
for k in 1..=max_k {
let phi_prev = &phi[h - k];
for i in 0..n {
for j in 0..n {
let mut acc = 0.0_f64;
for m in 0..n {
acc += a_matrices[k - 1][i][m] * phi_prev[m][j];
}
phi_h[i][j] += acc;
}
}
}
phi.push(phi_h);
}
Ok(phi)
}
pub fn compute_fevd(
irf: &[Vec<Vec<f64>>],
n: usize,
horizon: usize,
) -> CausalityResult<Vec<Vec<f64>>> {
let h_max = horizon.min(irf.len().saturating_sub(1));
let mut contrib = vec![vec![0.0_f64; n]; n]; for h in 0..=h_max {
for i in 0..n {
for j in 0..n {
let phi_hij = irf[h][i][j];
contrib[i][j] += phi_hij * phi_hij;
}
}
}
let mut fevd = vec![vec![0.0_f64; n]; n];
for i in 0..n {
let mse_i: f64 = contrib[i].iter().sum();
if mse_i.abs() > f64::EPSILON {
for j in 0..n {
fevd[i][j] = contrib[i][j] / mse_i;
}
} else {
let uniform = 1.0 / n as f64;
for j in 0..n {
fevd[i][j] = uniform;
}
}
}
Ok(fevd)
}
pub fn cholesky_decomp(a: &[Vec<f64>]) -> CausalityResult<Vec<Vec<f64>>> {
let n = a.len();
let mut l = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..=i {
let mut sum = 0.0_f64;
for k in 0..j {
sum += l[i][k] * l[j][k];
}
if i == j {
let diag_sq = a[i][i] - sum;
if diag_sq < 0.0 {
let reg = diag_sq.abs() + 1e-10;
l[i][j] = reg.sqrt();
} else {
l[i][j] = diag_sq.sqrt();
}
} else {
let ljj = l[j][j];
l[i][j] = if ljj.abs() > f64::EPSILON {
(a[i][j] - sum) / ljj
} else {
0.0
};
}
}
}
Ok(l)
}
fn forward_substitution(l: &[Vec<f64>], b: &[f64]) -> Vec<f64> {
let n = b.len();
let mut x = vec![0.0_f64; n];
for i in 0..n {
let mut s = b[i];
for j in 0..i {
s -= l[i][j] * x[j];
}
x[i] = if l[i][i].abs() > f64::EPSILON {
s / l[i][i]
} else {
0.0
};
}
x
}
fn back_substitution(u: &[Vec<f64>], b: &[f64]) -> Vec<f64> {
let n = b.len();
let mut x = vec![0.0_f64; n];
for i in (0..n).rev() {
let mut s = b[i];
for j in (i + 1)..n {
s -= u[i][j] * x[j];
}
x[i] = if u[i][i].abs() > f64::EPSILON {
s / u[i][i]
} else {
0.0
};
}
x
}
fn mat_inverse(a: &[Vec<f64>]) -> CausalityResult<Vec<Vec<f64>>> {
let n = a.len();
let mut aug = vec![vec![0.0_f64; 2 * n]; n];
for i in 0..n {
for j in 0..n {
aug[i][j] = a[i][j];
}
aug[i][n + i] = 1.0;
}
for col in 0..n {
let mut max_val = aug[col][col].abs();
let mut max_row = col;
for row in (col + 1)..n {
if aug[row][col].abs() > max_val {
max_val = aug[row][col].abs();
max_row = row;
}
}
aug.swap(col, max_row);
let pivot = aug[col][col];
if pivot.abs() < 1e-14 {
return Err(TimeSeriesError::NumericalInstability(
"Singular matrix in mat_inverse".to_string(),
));
}
for j in 0..(2 * n) {
aug[col][j] /= pivot;
}
for row in 0..n {
if row != col {
let factor = aug[row][col];
for j in 0..(2 * n) {
aug[row][j] -= factor * aug[col][j];
}
}
}
}
let mut inv = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
inv[i][j] = aug[i][n + j];
}
}
Ok(inv)
}
fn transpose(a: &[Vec<f64>]) -> Vec<Vec<f64>> {
let n = a.len();
let m = if n > 0 { a[0].len() } else { 0 };
let mut t = vec![vec![0.0_f64; n]; m];
for i in 0..n {
for j in 0..m {
t[j][i] = a[i][j];
}
}
t
}
fn mat_mul_mat(a: &[Vec<f64>], b: &[Vec<f64>], n: usize) -> Vec<Vec<f64>> {
let m = b.len();
let k = if m > 0 { b[0].len() } else { 0 };
let a_rows = a.len();
let mut c = vec![vec![0.0_f64; k]; a_rows];
for i in 0..a_rows {
for j in 0..k {
for l in 0..n.min(m) {
c[i][j] += a[i][l] * b[l][j];
}
}
}
c
}
fn mat_mul_t(
x: &[Vec<f64>],
_x2: &[Vec<f64>],
n_rows: usize,
n_cols: usize,
_k: usize,
) -> Vec<Vec<f64>> {
let mut xtx = vec![vec![0.0_f64; n_cols]; n_cols];
for row in 0..n_rows {
for i in 0..n_cols {
for j in 0..n_cols {
xtx[i][j] += x[row][i] * x[row][j];
}
}
}
xtx
}
fn mat_mul_matmul(a: &[Vec<f64>], b: &[Vec<f64>], c: &[Vec<f64>], n: usize) -> Vec<Vec<f64>> {
let ab = mat_mul_mat(a, b, n);
mat_mul_mat(&ab, c, n)
}
pub fn solve_linear_system_vec(a: &[Vec<f64>], b: &[f64]) -> CausalityResult<Vec<f64>> {
let n = a.len();
if n == 0 || b.len() != n {
return Err(TimeSeriesError::InvalidInput(
"Incompatible dimensions in linear system".to_string(),
));
}
let mut aug: Vec<Vec<f64>> = (0..n)
.map(|i| {
let mut row = a[i].clone();
row.push(b[i]);
row
})
.collect();
for col in 0..n {
let mut max_val = aug[col][col].abs();
let mut max_row = col;
for row in (col + 1)..n {
if aug[row][col].abs() > max_val {
max_val = aug[row][col].abs();
max_row = row;
}
}
aug.swap(col, max_row);
let pivot = aug[col][col];
if pivot.abs() < 1e-14 {
aug[col][col] = 1e-10;
}
let pivot = aug[col][col];
for row in (col + 1)..n {
let factor = aug[row][col] / pivot;
for j in col..(n + 1) {
aug[row][j] -= factor * aug[col][j];
}
}
}
let mut x = vec![0.0_f64; n];
for i in (0..n).rev() {
let mut s = aug[i][n];
for j in (i + 1)..n {
s -= aug[i][j] * x[j];
}
x[i] = if aug[i][i].abs() > 1e-14 {
s / aug[i][i]
} else {
0.0
};
}
Ok(x)
}
fn lcg_normal(state: &mut u64) -> f64 {
fn lcg_uniform(state: &mut u64) -> f64 {
*state = state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
((*state >> 11) as f64) / ((1u64 << 53) as f64)
}
let u1 = lcg_uniform(state).max(1e-15);
let u2 = lcg_uniform(state);
(-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
}
fn gram_schmidt(a: &[Vec<f64>], n: usize) -> Vec<Vec<f64>> {
let mut q: Vec<Vec<f64>> = Vec::with_capacity(n);
for j in 0..n {
let mut v: Vec<f64> = (0..n).map(|i| a[i][j]).collect();
for qk in &q {
let proj: f64 = v.iter().zip(qk.iter()).map(|(vi, qi)| vi * qi).sum();
for i in 0..n {
v[i] -= proj * qk[i];
}
}
let norm: f64 = v.iter().map(|vi| vi * vi).sum::<f64>().sqrt();
if norm > 1e-14 {
q.push(v.iter().map(|vi| vi / norm).collect());
} else {
let mut e = vec![0.0_f64; n];
e[j] = 1.0;
q.push(e);
}
}
let mut q_mat = vec![vec![0.0_f64; n]; n];
for j in 0..n {
for i in 0..n {
q_mat[i][j] = q[j][i];
}
}
q_mat
}
fn check_sign_restrictions(b: &[Vec<f64>], restrictions: &[Vec<f64>]) -> bool {
let n = b.len().min(restrictions.len());
for i in 0..n {
let m = b[i].len().min(restrictions[i].len());
for j in 0..m {
let r = restrictions[i][j];
if r > 0.5 && b[i][j] <= 0.0 {
return false;
}
if r < -0.5 && b[i][j] >= 0.0 {
return false;
}
}
}
true
}
#[cfg(test)]
mod tests {
use super::*;
fn lcg_rand(state: &mut u64) -> f64 {
*state = state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
((*state >> 11) as f64) / ((1u64 << 53) as f64) - 0.5
}
fn gen_bivar_ar1(n: usize, seed: u64) -> Vec<Vec<f64>> {
let mut data = vec![vec![0.0_f64; 2]; n];
let mut state = seed;
for t in 1..n {
let e1 = lcg_rand(&mut state) * 0.2;
let e2 = lcg_rand(&mut state) * 0.2;
data[t][0] = 0.7 * data[t - 1][0] + e1;
data[t][1] = 0.5 * data[t - 1][0] + 0.3 * data[t - 1][1] + e2;
}
data
}
#[test]
fn test_cholesky_decomp_correct() {
let a = vec![vec![4.0, 2.0], vec![2.0, 3.0]];
let l = cholesky_decomp(&a).expect("cholesky failed");
let n = 2;
for i in 0..n {
for j in 0..n {
let val: f64 = (0..n).map(|k| l[i][k] * l[j][k]).sum();
assert!(
(val - a[i][j]).abs() < 1e-10,
"L L'[{i},{j}] = {val} != a[{i},{j}] = {}",
a[i][j]
);
}
}
}
#[test]
fn test_cholesky_identity() {
let id = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
let l = cholesky_decomp(&id).expect("cholesky failed");
assert!((l[0][0] - 1.0).abs() < 1e-10);
assert!((l[1][1] - 1.0).abs() < 1e-10);
assert!(l[1][0].abs() < 1e-10);
assert!(l[0][1].abs() < 1e-10);
}
#[test]
fn test_var_fit_ar1() {
let data = gen_bivar_ar1(500, 42);
let (a_mats, _sigma) = fit_var(&data, 1).expect("fit_var failed");
assert_eq!(a_mats.len(), 1);
let a11 = a_mats[0][0][0];
assert!((a11 - 0.7).abs() < 0.1, "A[1][0,0] = {a11}, expected ≈ 0.7");
let a21 = a_mats[0][1][0];
assert!(
(a21 - 0.5).abs() < 0.15,
"A[1][1,0] = {a21}, expected ≈ 0.5"
);
}
#[test]
fn test_irf_impact_effect() {
let b = vec![vec![1.0, 0.0], vec![0.5, 1.0]];
let a = vec![vec![vec![0.3, 0.0], vec![0.1, 0.2]]];
let irf = compute_irf(&a, &b, 2, 1, 5).expect("irf failed");
assert!((irf[0][0][0] - b[0][0]).abs() < 1e-10);
assert!((irf[0][1][0] - b[1][0]).abs() < 1e-10);
}
#[test]
fn test_irf_zero_at_horizon() {
let b = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
let a = vec![vec![vec![0.1, 0.0], vec![0.0, 0.1]]];
let irf = compute_irf(&a, &b, 2, 1, 30).expect("irf failed");
let max_abs = irf[30]
.iter()
.flat_map(|row| row.iter())
.map(|v| v.abs())
.fold(0.0_f64, f64::max);
assert!(max_abs < 0.5, "IRF should decay: max = {max_abs}");
}
#[test]
fn test_fevd_sums_to_one() {
let b = vec![vec![1.0, 0.0], vec![0.5, 1.0]];
let a = vec![vec![vec![0.5, 0.1], vec![0.0, 0.4]]];
let irf = compute_irf(&a, &b, 2, 1, 12).expect("irf failed");
let fevd = compute_fevd(&irf, 2, 12).expect("fevd failed");
for i in 0..2 {
let sum: f64 = fevd[i].iter().sum();
assert!(
(sum - 1.0).abs() < 1e-10,
"FEVD row {i} sums to {sum}, expected 1"
);
}
}
#[test]
fn test_svar_cholesky_ident() {
let data = gen_bivar_ar1(300, 7);
let config = SvarConfig {
max_lag: 1,
identification: SvarIdent::Cholesky,
irf_horizon: 8,
sign_restrictions: None,
};
let model = SvarModel::new(config);
let result = model.fit(&data).expect("svar fit failed");
assert!(
result.b_matrix[0][1].abs() < 1e-10,
"B[0,1] = {} should be zero for Cholesky",
result.b_matrix[0][1]
);
}
#[test]
fn test_svar_full_pipeline() {
let data = gen_bivar_ar1(400, 99);
let config = SvarConfig::default();
let model = SvarModel::new(config);
let result = model.fit(&data).expect("svar full pipeline failed");
assert_eq!(result.n_vars, 2);
assert!(!result.irf.is_empty());
for i in 0..2 {
let sum: f64 = result.fevd[i].iter().sum();
assert!((sum - 1.0).abs() < 1e-10);
}
}
}