use scirs2_core::ndarray::{Array1, Array2, Axis};
use crate::error::{Result, TimeSeriesError};
fn mat_transpose_mul(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
let (m, n) = (a.shape()[0], a.shape()[1]);
let (bm, p) = (b.shape()[0], b.shape()[1]);
if m != bm {
return Err(TimeSeriesError::DimensionMismatch {
expected: m,
actual: bm,
});
}
let mut out = Array2::<f64>::zeros((n, p));
for i in 0..n {
for j in 0..p {
let mut s = 0.0_f64;
for k in 0..m {
s += a[[k, i]] * b[[k, j]];
}
out[[i, j]] = s;
}
}
Ok(out)
}
fn mat_mul(a: &Array2<f64>, b: &Array2<f64>) -> Result<Array2<f64>> {
let (m, k) = (a.shape()[0], a.shape()[1]);
let (bk, n) = (b.shape()[0], b.shape()[1]);
if k != bk {
return Err(TimeSeriesError::DimensionMismatch {
expected: k,
actual: bk,
});
}
let mut out = Array2::<f64>::zeros((m, n));
for i in 0..m {
for j in 0..n {
let mut s = 0.0_f64;
for l in 0..k {
s += a[[i, l]] * b[[l, j]];
}
out[[i, j]] = s;
}
}
Ok(out)
}
fn mat_mul_vec(a: &Array2<f64>, v: &Array1<f64>) -> Result<Array1<f64>> {
let (m, k) = (a.shape()[0], a.shape()[1]);
if k != v.len() {
return Err(TimeSeriesError::DimensionMismatch {
expected: k,
actual: v.len(),
});
}
let mut out = Array1::<f64>::zeros(m);
for i in 0..m {
for j in 0..k {
out[i] += a[[i, j]] * v[j];
}
}
Ok(out)
}
pub fn cholesky_inverse(a: &Array2<f64>) -> Result<Array2<f64>> {
let n = a.shape()[0];
if a.shape()[1] != n {
return Err(TimeSeriesError::InvalidInput(
"Matrix must be square".to_string(),
));
}
let mut l = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s = a[[i, j]];
for k in 0..j {
s -= l[[i, k]] * l[[j, k]];
}
if i == j {
if s <= 0.0 {
return Err(TimeSeriesError::NumericalInstability(format!(
"Matrix is not positive-definite at diagonal ({i},{i}): s={s}"
)));
}
l[[i, i]] = s.sqrt();
} else {
let diag = l[[j, j]];
if diag.abs() < f64::EPSILON * 1e6 {
return Err(TimeSeriesError::NumericalInstability(
"Near-zero diagonal in Cholesky".to_string(),
));
}
l[[i, j]] = s / diag;
}
}
}
let mut l_inv = Array2::<f64>::zeros((n, n));
for col in 0..n {
for row in 0..n {
let mut s = if row == col { 1.0 } else { 0.0 };
for k in 0..row {
s -= l[[row, k]] * l_inv[[k, col]];
}
let diag = l[[row, row]];
if diag.abs() < f64::EPSILON * 1e6 {
return Err(TimeSeriesError::NumericalInstability(
"Near-zero Cholesky diagonal during inversion".to_string(),
));
}
l_inv[[row, col]] = s / diag;
}
}
let mut a_inv = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
let mut s = 0.0_f64;
for k in 0..n {
s += l_inv[[k, i]] * l_inv[[k, j]];
}
a_inv[[i, j]] = s;
}
}
Ok(a_inv)
}
fn projection_matrix(s: &Array2<f64>, w_inv: &Array2<f64>) -> Result<Array2<f64>> {
let n = s.shape()[0];
let m = s.shape()[1];
if w_inv.shape() != [n, n] {
return Err(TimeSeriesError::InvalidInput(format!(
"W_inv must be ({n},{n}), got ({},{})",
w_inv.shape()[0],
w_inv.shape()[1]
)));
}
let c = mat_transpose_mul(s, w_inv)?; let _ = m;
let m_mat = mat_mul(&c, s)?;
let m_inv = cholesky_inverse(&m_mat)?;
let p = mat_mul(&m_inv, &c)?; Ok(p)
}
pub struct MinTraceReconciliation;
impl MinTraceReconciliation {
pub fn reconcile_ols(base_forecasts: &Array2<f64>, s: &Array2<f64>) -> Result<Array2<f64>> {
let n = s.shape()[0];
if base_forecasts.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_forecasts.shape()[0],
});
}
let w_inv = Array2::<f64>::eye(n);
let p = projection_matrix(s, &w_inv)?;
let sp = mat_mul(s, &p)?; mat_mul(&sp, base_forecasts) }
pub fn reconcile_wls(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
w: &Array1<f64>,
) -> Result<Array2<f64>> {
let n = s.shape()[0];
if base_forecasts.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_forecasts.shape()[0],
});
}
if w.len() != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: w.len(),
});
}
for (i, &wi) in w.iter().enumerate() {
if wi <= 0.0 {
return Err(TimeSeriesError::InvalidParameter {
name: format!("w[{i}]"),
message: "Weight must be strictly positive".to_string(),
});
}
}
let mut w_inv = Array2::<f64>::zeros((n, n));
for i in 0..n {
w_inv[[i, i]] = 1.0 / w[i];
}
let p = projection_matrix(s, &w_inv)?;
let sp = mat_mul(s, &p)?;
mat_mul(&sp, base_forecasts)
}
pub fn reconcile_mint_shrink(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
residuals: &Array2<f64>,
) -> Result<Array2<f64>> {
let n = s.shape()[0];
let t_obs = residuals.shape()[1];
if base_forecasts.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_forecasts.shape()[0],
});
}
if residuals.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: residuals.shape()[0],
});
}
if t_obs < 2 {
return Err(TimeSeriesError::InsufficientData {
message: "Need at least 2 residual observations for covariance estimation"
.to_string(),
required: 2,
actual: t_obs,
});
}
let t_f64 = t_obs as f64;
let means: Vec<f64> = (0..n)
.map(|i| residuals.row(i).iter().copied().sum::<f64>() / t_f64)
.collect();
let mut sigma = Array2::<f64>::zeros((n, n));
for t in 0..t_obs {
for i in 0..n {
let ei = residuals[[i, t]] - means[i];
for j in 0..=i {
let ej = residuals[[j, t]] - means[j];
sigma[[i, j]] += ei * ej;
if i != j {
sigma[[j, i]] += ei * ej;
}
}
}
}
let scale = 1.0 / (t_f64 - 1.0);
for v in sigma.iter_mut() {
*v *= scale;
}
let mut sum_off_sq = 0.0_f64;
let mut sum_all_sq = 0.0_f64;
for i in 0..n {
for j in 0..n {
let v = sigma[[i, j]];
sum_all_sq += v * v;
if i != j {
sum_off_sq += v * v;
}
}
}
let rho = if sum_all_sq < f64::EPSILON {
0.0_f64
} else {
let phi = sum_off_sq / sum_all_sq;
let denom = 1.0 + phi * (1.0 - 1.0 / n as f64);
(phi / denom).clamp(0.0, 1.0)
};
let mut w_shrunk = sigma.clone();
for i in 0..n {
for j in 0..n {
w_shrunk[[i, j]] *= 1.0 - rho;
}
w_shrunk[[i, i]] += rho * sigma[[i, i]];
}
let eps_reg = 1e-8
* (0..n)
.map(|i| w_shrunk[[i, i]])
.fold(f64::NEG_INFINITY, f64::max)
.max(1e-8);
for i in 0..n {
w_shrunk[[i, i]] += eps_reg;
}
let w_inv = cholesky_inverse(&w_shrunk)?;
let p = projection_matrix(s, &w_inv)?;
let sp = mat_mul(s, &p)?;
mat_mul(&sp, base_forecasts)
}
}
pub struct ErmReconciliation {
pub lambda: f64,
}
impl ErmReconciliation {
pub fn new(lambda: f64) -> Result<Self> {
if lambda < 0.0 {
return Err(TimeSeriesError::InvalidParameter {
name: "lambda".to_string(),
message: "Ridge parameter must be non-negative".to_string(),
});
}
Ok(Self { lambda })
}
pub fn reconcile(
&self,
base_forecasts_train: &Array2<f64>,
actuals_train: &Array2<f64>,
base_forecasts_test: &Array2<f64>,
s: &Array2<f64>,
) -> Result<Array2<f64>> {
let n = s.shape()[0];
let t_train = base_forecasts_train.shape()[1];
if base_forecasts_train.shape()[0] != n || actuals_train.shape() != base_forecasts_train.shape() {
return Err(TimeSeriesError::InvalidInput(
"Training arrays must have shape (n, T_train)".to_string(),
));
}
if base_forecasts_test.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_forecasts_test.shape()[0],
});
}
let mut b = Array2::<f64>::zeros((n, n));
for t in 0..t_train {
for i in 0..n {
for j in 0..n {
b[[i, j]] += base_forecasts_train[[i, t]] * base_forecasts_train[[j, t]];
}
}
}
for i in 0..n {
b[[i, i]] += self.lambda;
}
let mut c = Array2::<f64>::zeros((n, n));
for t in 0..t_train {
for i in 0..n {
for j in 0..n {
c[[i, j]] += actuals_train[[i, t]] * base_forecasts_train[[j, t]];
}
}
}
let b_inv = cholesky_inverse(&b)?;
let g_raw = mat_mul(&c, &b_inv)?;
let sts = mat_transpose_mul(s, s)?; let sts_inv = cholesky_inverse(&sts)?;
let st = {
let (rows, cols) = (s.shape()[0], s.shape()[1]);
let mut st = Array2::<f64>::zeros((cols, rows));
for i in 0..rows {
for j in 0..cols {
st[[j, i]] = s[[i, j]];
}
}
st
};
let p_ols = mat_mul(&sts_inv, &st)?; let sp_ols = mat_mul(s, &p_ols)?;
let g_coherent = mat_mul(&sp_ols, &g_raw)?;
mat_mul(&g_coherent, base_forecasts_test)
}
}
pub fn nonnegative_reconcile(forecasts: &Array2<f64>, s: &Array2<f64>) -> Result<Array2<f64>> {
let mut reconciled = MinTraceReconciliation::reconcile_ols(forecasts, s)?;
let m = s.shape()[1];
let n = s.shape()[0];
let h = reconciled.shape()[1];
let bottom_ids: Vec<usize> = (0..n)
.filter(|&i| {
let row = s.row(i);
let ones: usize = row.iter().filter(|&&v| v > 0.5).count();
ones == 1
})
.collect();
for t in 0..h {
for &i in &bottom_ids {
if reconciled[[i, t]] < 0.0 {
reconciled[[i, t]] = 0.0;
}
}
for i in 0..n {
if bottom_ids.contains(&i) {
continue;
}
let mut agg = 0.0_f64;
for j in 0..m {
let bottom_node = bottom_ids.get(j);
if let Some(&bn) = bottom_node {
agg += s[[i, j]] * reconciled[[bn, t]];
}
}
reconciled[[i, t]] = agg;
}
}
Ok(reconciled)
}
pub fn mase_hierarchical(
actuals: &Array2<f64>,
forecasts: &Array2<f64>,
training: &Array2<f64>,
) -> Result<Array1<f64>> {
let n = actuals.shape()[0];
let h = actuals.shape()[1];
if forecasts.shape() != actuals.shape() {
return Err(TimeSeriesError::InvalidInput(format!(
"forecasts shape {:?} != actuals shape {:?}",
forecasts.shape(),
actuals.shape()
)));
}
if training.shape()[0] != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: training.shape()[0],
});
}
let t_train = training.shape()[1];
if t_train < 2 {
return Err(TimeSeriesError::InsufficientData {
message: "MASE requires at least 2 training observations".to_string(),
required: 2,
actual: t_train,
});
}
let h_f64 = h as f64;
let mut result = Array1::<f64>::zeros(n);
for i in 0..n {
let mae_f: f64 = (0..h)
.map(|t| (actuals[[i, t]] - forecasts[[i, t]]).abs())
.sum::<f64>()
/ h_f64;
let mae_naive: f64 = (1..t_train)
.map(|t| (training[[i, t]] - training[[i, t - 1]]).abs())
.sum::<f64>()
/ (t_train - 1) as f64;
result[i] = if mae_naive < f64::EPSILON {
mae_f
} else {
mae_f / mae_naive
};
}
Ok(result)
}
pub fn wls_variance_weights(residuals: &Array2<f64>) -> Result<Array1<f64>> {
let n = residuals.shape()[0];
let t_obs = residuals.shape()[1];
if t_obs < 2 {
return Err(TimeSeriesError::InsufficientData {
message: "Need at least 2 observations for variance estimation".to_string(),
required: 2,
actual: t_obs,
});
}
let t_f64 = t_obs as f64;
let mut weights = Array1::<f64>::zeros(n);
for i in 0..n {
let mean = residuals.row(i).iter().copied().sum::<f64>() / t_f64;
let var = residuals
.row(i)
.iter()
.map(|&r| (r - mean).powi(2))
.sum::<f64>()
/ (t_f64 - 1.0);
weights[i] = var.max(1e-10);
}
Ok(weights)
}
pub fn wls_structural_weights(s: &Array2<f64>) -> Array1<f64> {
let n = s.shape()[0];
let mut weights = Array1::<f64>::zeros(n);
for i in 0..n {
let count: f64 = s.row(i).iter().copied().sum();
weights[i] = count.max(1.0);
}
weights
}
pub struct OLSReconciliation;
impl OLSReconciliation {
pub fn reconcile(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
) -> Result<Array2<f64>> {
MinTraceReconciliation::reconcile_ols(base_forecasts, s)
}
}
pub struct WLSReconciliation;
impl WLSReconciliation {
pub fn reconcile(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
w: &Array1<f64>,
) -> Result<Array2<f64>> {
MinTraceReconciliation::reconcile_wls(base_forecasts, s, w)
}
}
pub struct MinTReconciliation;
impl MinTReconciliation {
pub fn reconcile(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
residuals: &Array2<f64>,
) -> Result<Array2<f64>> {
MinTraceReconciliation::reconcile_mint_shrink(base_forecasts, s, residuals)
}
}
pub struct BottomUpReconciliation;
impl BottomUpReconciliation {
pub fn reconcile(
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
) -> Result<Array2<f64>> {
let n_all = s.shape()[0];
let n_bottom = s.shape()[1];
let t = base_forecasts.shape()[1];
if base_forecasts.shape()[0] != n_all {
return Err(TimeSeriesError::DimensionMismatch {
expected: n_all,
actual: base_forecasts.shape()[0],
});
}
if n_all < n_bottom {
return Err(TimeSeriesError::InvalidInput(
"S has more columns (bottom series) than rows (total series)".to_string(),
));
}
let bottom_start = n_all - n_bottom;
let bottom = base_forecasts.slice(scirs2_core::ndarray::s![bottom_start.., ..]).to_owned();
let mut out = Array2::<f64>::zeros((n_all, t));
for i in 0..n_all {
for tt in 0..t {
let mut sum = 0.0_f64;
for j in 0..n_bottom {
sum += s[[i, j]] * bottom[[j, tt]];
}
out[[i, tt]] = sum;
}
}
Ok(out)
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum TopDownMethod {
AverageHistoricalProportion,
ProportionalMedianAbsolutePct,
Proportional,
}
pub struct TopDownReconciliation {
pub method: TopDownMethod,
}
impl TopDownReconciliation {
pub fn new(method: TopDownMethod) -> Self {
Self { method }
}
pub fn reconcile(
&self,
base_forecasts: &Array2<f64>,
s: &Array2<f64>,
actuals: &Array2<f64>,
) -> Result<Array2<f64>> {
let n_all = s.shape()[0];
let n_bottom = s.shape()[1];
let t = base_forecasts.shape()[1];
if base_forecasts.shape()[0] != n_all {
return Err(TimeSeriesError::DimensionMismatch {
expected: n_all,
actual: base_forecasts.shape()[0],
});
}
if actuals.shape()[0] != n_all {
return Err(TimeSeriesError::DimensionMismatch {
expected: n_all,
actual: actuals.shape()[0],
});
}
let bottom_start = n_all - n_bottom;
let t_hist = actuals.shape()[1];
let mut proportions = vec![0.0_f64; n_bottom];
for j in 0..n_bottom {
let bottom_idx = bottom_start + j;
let prop = match self.method {
TopDownMethod::AverageHistoricalProportion => {
let mut sum = 0.0_f64;
let mut count = 0_usize;
for tt in 0..t_hist {
let top = actuals[[0, tt]];
if top.abs() > 1e-12 {
sum += actuals[[bottom_idx, tt]] / top;
count += 1;
}
}
if count == 0 { 1.0 / n_bottom as f64 } else { sum / count as f64 }
}
TopDownMethod::ProportionalMedianAbsolutePct => {
let mut ratios: Vec<f64> = Vec::with_capacity(t_hist);
for tt in 0..t_hist {
let total: f64 = (0..n_bottom)
.map(|jj| actuals[[bottom_start + jj, tt]].abs())
.sum();
if total > 1e-12 {
ratios.push(actuals[[bottom_idx, tt]].abs() / total);
}
}
if ratios.is_empty() {
1.0 / n_bottom as f64
} else {
ratios.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let mid = ratios.len() / 2;
if ratios.len() % 2 == 0 {
(ratios[mid - 1] + ratios[mid]) / 2.0
} else {
ratios[mid]
}
}
}
TopDownMethod::Proportional => {
1.0 / n_bottom as f64
}
};
proportions[j] = prop;
}
let prop_sum: f64 = proportions.iter().sum();
if prop_sum > 1e-12 {
for p in &mut proportions {
*p /= prop_sum;
}
} else {
for p in &mut proportions {
*p = 1.0 / n_bottom as f64;
}
}
let mut out = Array2::<f64>::zeros((n_all, t));
for j in 0..n_bottom {
for tt in 0..t {
out[[bottom_start + j, tt]] = proportions[j] * base_forecasts[[0, tt]];
}
}
let bottom_slice = out.slice(scirs2_core::ndarray::s![bottom_start.., ..]).to_owned();
for i in 0..bottom_start {
for tt in 0..t {
let mut sum = 0.0_f64;
for j in 0..n_bottom {
sum += s[[i, j]] * bottom_slice[[j, tt]];
}
out[[i, tt]] = sum;
}
}
Ok(out)
}
}
pub struct SummationMatrix;
impl SummationMatrix {
pub fn from_parents(parents: &[Option<usize>]) -> Result<Array2<f64>> {
let n = parents.len();
if n == 0 {
return Err(TimeSeriesError::InvalidInput(
"Empty parent list".to_string(),
));
}
let mut is_parent = vec![false; n];
for &p in parents.iter().flatten() {
if p < n {
is_parent[p] = true;
}
}
let leaves: Vec<usize> = (0..n).filter(|&i| !is_parent[i]).collect();
let n_leaves = leaves.len();
if n_leaves == 0 {
return Err(TimeSeriesError::InvalidInput(
"No leaf nodes found in hierarchy".to_string(),
));
}
let mut s = Array2::<f64>::zeros((n, n_leaves));
for (col, &leaf) in leaves.iter().enumerate() {
let mut node = leaf;
loop {
s[[node, col]] = 1.0;
match parents[node] {
Some(p) if p < n => node = p,
_ => break,
}
}
}
Ok(s)
}
pub fn two_level(n_bottom: usize) -> Result<Array2<f64>> {
if n_bottom == 0 {
return Err(TimeSeriesError::InvalidInput(
"n_bottom must be at least 1".to_string(),
));
}
let mut s = Array2::<f64>::zeros((n_bottom + 1, n_bottom));
for j in 0..n_bottom {
s[[0, j]] = 1.0;
}
for j in 0..n_bottom {
s[[j + 1, j]] = 1.0;
}
Ok(s)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::hierarchical::{summing_matrix, Hierarchy};
use scirs2_core::ndarray::array;
fn build_simple_hierarchy() -> (Hierarchy, Array2<f64>) {
let mut h = Hierarchy::new();
let total = h.add_node("Total", None).expect("failed to create total");
let a = h.add_node("A", Some(total)).expect("failed to create a");
let b = h.add_node("B", Some(total)).expect("failed to create b");
h.add_node("A1", Some(a)).expect("unexpected None or Err");
h.add_node("A2", Some(a)).expect("unexpected None or Err");
h.add_node("B1", Some(b)).expect("unexpected None or Err");
let s = summing_matrix(&h).expect("failed to create s");
(h, s)
}
#[test]
fn test_ols_reconciliation_coherence() {
let (_, s) = build_simple_hierarchy();
let n = s.shape()[0]; let h = 2;
let mut base = Array2::<f64>::zeros((n, h));
base[[0, 0]] = 100.0;
base[[1, 0]] = 42.0;
base[[2, 0]] = 61.0; base[[3, 0]] = 20.0;
base[[4, 0]] = 25.0;
base[[5, 0]] = 58.0;
base[[0, 1]] = 110.0;
base[[1, 1]] = 45.0;
base[[2, 1]] = 68.0;
base[[3, 1]] = 22.0;
base[[4, 1]] = 26.0;
base[[5, 1]] = 65.0;
let reconciled = MinTraceReconciliation::reconcile_ols(&base, &s).expect("failed to create reconciled");
assert_eq!(reconciled.shape(), &[6, 2]);
let m = s.shape()[1]; for t in 0..h {
let mut bottom_sum = 0.0_f64;
for j in 0..m {
bottom_sum += reconciled[[3 + j, t]];
}
let total = reconciled[[0, t]];
assert!(
(total - bottom_sum).abs() < 1e-6,
"Coherence violated at t={t}: total={total}, bottom_sum={bottom_sum}"
);
}
}
#[test]
fn test_wls_reconciliation() {
let (_, s) = build_simple_hierarchy();
let n = s.shape()[0];
let base = Array2::<f64>::ones((n, 1));
let weights = Array1::from_vec(vec![2.0; n]);
let result = MinTraceReconciliation::reconcile_wls(&base, &s, &weights).expect("failed to create result");
assert_eq!(result.shape(), &[6, 1]);
}
#[test]
fn test_mint_shrink_reconciliation() {
let (_, s) = build_simple_hierarchy();
let n = s.shape()[0];
let h = 1;
let base = Array2::<f64>::ones((n, h));
let t_r = 20;
let mut residuals = Array2::<f64>::zeros((n, t_r));
for i in 0..n {
for t in 0..t_r {
residuals[[i, t]] = ((i + t) as f64) * 0.1 - 0.5;
}
}
let result = MinTraceReconciliation::reconcile_mint_shrink(&base, &s, &residuals).expect("failed to create result");
assert_eq!(result.shape(), &[6, 1]);
}
#[test]
fn test_mase_hierarchical() {
let n = 3;
let h = 4;
let t_train = 10;
let actuals = Array2::<f64>::ones((n, h)) * 5.0;
let forecasts = Array2::<f64>::ones((n, h)) * 4.0; let mut training = Array2::<f64>::zeros((n, t_train));
for i in 0..n {
for t in 0..t_train {
training[[i, t]] = 5.0 + (t as f64) * 0.1; }
}
let mase = mase_hierarchical(&actuals, &forecasts, &training).expect("failed to create mase");
assert_eq!(mase.len(), n);
for &v in mase.iter() {
assert!(v > 0.0, "MASE must be positive");
}
}
#[test]
fn test_nonnegative_reconcile() {
let (_, s) = build_simple_hierarchy();
let n = s.shape()[0];
let mut base = Array2::<f64>::zeros((n, 2));
base[[3, 0]] = -5.0;
base[[4, 0]] = 20.0;
base[[5, 0]] = 30.0;
base[[1, 0]] = 15.0;
base[[2, 0]] = 30.0;
base[[0, 0]] = 45.0;
base[[3, 1]] = 10.0;
base[[4, 1]] = 15.0;
base[[5, 1]] = 20.0;
base[[1, 1]] = 25.0;
base[[2, 1]] = 20.0;
base[[0, 1]] = 45.0;
let result = nonnegative_reconcile(&base, &s).expect("failed to create result");
assert_eq!(result.shape(), &[6, 2]);
for t in 0..2 {
for &i in &[3usize, 4, 5] {
assert!(
result[[i, t]] >= -1e-10,
"Negative bottom-level value at node {i}, t={t}: {}",
result[[i, t]]
);
}
}
}
#[test]
fn test_wls_structural_weights() {
let (_, s) = build_simple_hierarchy();
let weights = wls_structural_weights(&s);
assert!((weights[0] - 3.0).abs() < 1e-9);
assert!((weights[3] - 1.0).abs() < 1e-9);
}
#[test]
fn test_cholesky_inverse() {
let a = array![[4.0, 2.0], [2.0, 3.0]];
let a_inv = cholesky_inverse(&a).expect("failed to create a_inv");
let prod = mat_mul(&a, &a_inv).expect("failed to create prod");
assert!((prod[[0, 0]] - 1.0).abs() < 1e-9);
assert!((prod[[0, 1]]).abs() < 1e-9);
assert!((prod[[1, 0]]).abs() < 1e-9);
assert!((prod[[1, 1]] - 1.0).abs() < 1e-9);
}
}