use crate::error::{Result, TimeSeriesError};
#[derive(Debug, Clone)]
pub struct HierarchyMatrix {
pub n_all: usize,
pub n_bottom: usize,
pub s: Vec<Vec<f64>>,
}
impl HierarchyMatrix {
pub fn from_s(s: Vec<Vec<f64>>) -> Result<Self> {
let n_all = s.len();
if n_all == 0 {
return Err(TimeSeriesError::InvalidInput(
"S matrix must be non-empty".to_string(),
));
}
let n_bottom = s[0].len();
if n_bottom == 0 {
return Err(TimeSeriesError::InvalidInput(
"S matrix must have at least one column".to_string(),
));
}
Ok(Self { n_all, n_bottom, s })
}
}
pub fn build_s_matrix(hierarchy: &[(usize, Vec<usize>)]) -> Result<HierarchyMatrix> {
use std::collections::HashSet;
let mut all_nodes: HashSet<usize> = HashSet::new();
let mut parent_nodes: HashSet<usize> = HashSet::new();
for (parent, children) in hierarchy {
all_nodes.insert(*parent);
parent_nodes.insert(*parent);
for &c in children {
all_nodes.insert(c);
}
}
if all_nodes.is_empty() {
return Err(TimeSeriesError::InvalidInput(
"hierarchy must be non-empty".to_string(),
));
}
let mut all_sorted: Vec<usize> = all_nodes.into_iter().collect();
all_sorted.sort_unstable();
let n_all = all_sorted.len();
let mut bottom_nodes: Vec<usize> = all_sorted
.iter()
.copied()
.filter(|n| !parent_nodes.contains(n))
.collect();
bottom_nodes.sort_unstable();
let n_bottom = bottom_nodes.len();
if n_bottom == 0 {
return Err(TimeSeriesError::InvalidInput(
"No bottom-level (leaf) nodes found in hierarchy".to_string(),
));
}
let mut row_of: Vec<usize> = vec![0; all_sorted.last().copied().unwrap_or(0) + 1];
for (row, &id) in all_sorted.iter().enumerate() {
row_of[id] = row;
}
let mut col_of: std::collections::HashMap<usize, usize> = std::collections::HashMap::new();
for (col, &id) in bottom_nodes.iter().enumerate() {
col_of.insert(id, col);
}
let mut s = vec![vec![0.0_f64; n_bottom]; n_all];
for (col, &bid) in bottom_nodes.iter().enumerate() {
let row = row_of[bid];
s[row][col] = 1.0;
}
let mut children_map: std::collections::HashMap<usize, Vec<usize>> =
std::collections::HashMap::new();
for (parent, children) in hierarchy {
children_map
.entry(*parent)
.or_default()
.extend(children.iter().copied());
}
for &node in &all_sorted {
if col_of.contains_key(&node) {
continue; }
let row = row_of[node];
let mut stack = vec![node];
while let Some(cur) = stack.pop() {
if col_of.contains_key(&cur) {
let col = col_of[&cur];
s[row][col] = 1.0;
} else if let Some(ch) = children_map.get(&cur) {
for &c in ch {
stack.push(c);
}
}
}
}
HierarchyMatrix::from_s(s)
}
pub fn bottom_up_reconcile(bottom_fc: &[Vec<f64>], hm: &HierarchyMatrix) -> Result<Vec<Vec<f64>>> {
let n_b = bottom_fc.len();
if n_b != hm.n_bottom {
return Err(TimeSeriesError::DimensionMismatch {
expected: hm.n_bottom,
actual: n_b,
});
}
let h = if n_b > 0 { bottom_fc[0].len() } else { 0 };
let mut out = vec![vec![0.0_f64; h]; hm.n_all];
for i in 0..hm.n_all {
for t in 0..h {
for j in 0..hm.n_bottom {
out[i][t] += hm.s[i][j] * bottom_fc[j][t];
}
}
}
Ok(out)
}
pub fn mint_reconcile(
base_fc: &[Vec<f64>],
residuals: &[Vec<f64>],
hm: &HierarchyMatrix,
) -> Result<Vec<Vec<f64>>> {
let n = hm.n_all;
let m = hm.n_bottom;
if base_fc.len() != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_fc.len(),
});
}
let h = if n > 0 { base_fc[0].len() } else { 0 };
let w = if residuals.is_empty() || residuals[0].len() != n {
let mut id = vec![vec![0.0_f64; n]; n];
for i in 0..n {
id[i][i] = 1.0;
}
id
} else {
sample_covariance(residuals, n)
};
let w_inv = match mat_inverse_lu(&w, n) {
Ok(wi) => wi,
Err(_) => {
let lambda = 1e-6;
let mut wr = w.clone();
for i in 0..n {
wr[i][i] += lambda;
}
mat_inverse_lu(&wr, n).unwrap_or_else(|_| {
let mut id = vec![vec![0.0_f64; n]; n];
for i in 0..n {
id[i][i] = 1.0;
}
id
})
}
};
let mut p = vec![vec![0.0_f64; n]; m];
for j in 0..m {
for k in 0..n {
for l in 0..n {
p[j][k] += hm.s[l][j] * w_inv[l][k];
}
}
}
let mut swts = vec![vec![0.0_f64; m]; m];
for i in 0..m {
for j in 0..m {
for k in 0..n {
swts[i][j] += p[i][k] * hm.s[k][j];
}
}
}
let swts_inv = match mat_inverse_lu(&swts, m) {
Ok(inv) => inv,
Err(_) => {
return bottom_up_reconcile(&extract_bottom_rows(base_fc, hm)?, hm);
}
};
let mut g = vec![vec![0.0_f64; n]; m];
for i in 0..m {
for k in 0..n {
for j in 0..m {
g[i][k] += swts_inv[i][j] * p[j][k];
}
}
}
let mut gy = vec![vec![0.0_f64; h]; m];
for i in 0..m {
for t in 0..h {
for k in 0..n {
gy[i][t] += g[i][k] * base_fc[k][t];
}
}
}
let mut out = vec![vec![0.0_f64; h]; n];
for i in 0..n {
for t in 0..h {
for j in 0..m {
out[i][t] += hm.s[i][j] * gy[j][t];
}
}
}
Ok(out)
}
fn extract_bottom_rows(all_fc: &[Vec<f64>], hm: &HierarchyMatrix) -> Result<Vec<Vec<f64>>> {
let mut bottom_rows: Vec<Vec<f64>> = Vec::new();
for i in 0..hm.n_all {
let nonzero: usize = hm.s[i].iter().filter(|&&v| v > 0.5).count();
if nonzero == 1 {
bottom_rows.push(all_fc[i].clone());
}
}
if bottom_rows.len() != hm.n_bottom {
return Err(TimeSeriesError::InvalidInput(
"Could not identify bottom-level rows from S matrix".to_string(),
));
}
Ok(bottom_rows)
}
pub fn erm_reconcile(
base_fc_val: &[Vec<f64>],
actuals_val: &[Vec<f64>],
hm: &HierarchyMatrix,
new_base_fc: &[Vec<f64>],
) -> Result<Vec<Vec<f64>>> {
let n = hm.n_all;
let m = hm.n_bottom;
if base_fc_val.len() != n || actuals_val.len() != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: base_fc_val.len(),
});
}
let n_obs = base_fc_val[0].len();
if n_obs == 0 {
return Err(TimeSeriesError::InvalidInput(
"Validation set must be non-empty".to_string(),
));
}
if new_base_fc.len() != n {
return Err(TimeSeriesError::DimensionMismatch {
expected: n,
actual: new_base_fc.len(),
});
}
let h = new_base_fc[0].len();
let mut yyt = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
for t in 0..n_obs {
yyt[i][j] += base_fc_val[i][t] * base_fc_val[j][t];
}
}
}
let yyt_inv = {
for i in 0..n {
yyt[i][i] += 1e-6;
}
mat_inverse_lu(&yyt, n)?
};
let mut yyt_actual = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
for t in 0..n_obs {
yyt_actual[i][j] += base_fc_val[i][t] * actuals_val[j][t];
}
}
}
let mut sst = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
for k in 0..m {
sst[i][j] += hm.s[i][k] * hm.s[j][k];
}
}
}
for i in 0..n {
sst[i][i] += 1e-6;
}
let sst_inv = mat_inverse_lu(&sst, n)?;
let a = mat_mul_square(&yyt_inv, &yyt_actual, n);
let st = transpose_mat(&hm.s, n, m);
let mut sts = vec![vec![0.0_f64; m]; m];
for i in 0..m {
for j in 0..m {
for k in 0..n {
sts[i][j] += hm.s[k][i] * hm.s[k][j];
}
}
}
for i in 0..m {
sts[i][i] += 1e-6;
}
let sts_inv = mat_inverse_lu(&sts, m)?;
let mut sty = vec![vec![0.0_f64; n_obs]; m];
for i in 0..m {
for t in 0..n_obs {
for k in 0..n {
sty[i][t] += hm.s[k][i] * actuals_val[k][t];
}
}
}
let mut styyt = vec![vec![0.0_f64; n]; m];
for i in 0..m {
for j in 0..n {
for t in 0..n_obs {
styyt[i][j] += sty[i][t] * base_fc_val[j][t];
}
}
}
let tmp = mat_mul_rect(&sts_inv, &styyt, m, m, n);
let p_hat = mat_mul_rect(&tmp, &yyt_inv, m, n, n);
let mut py = vec![vec![0.0_f64; h]; m];
for i in 0..m {
for t in 0..h {
for k in 0..n {
py[i][t] += p_hat[i][k] * new_base_fc[k][t];
}
}
}
let mut out = vec![vec![0.0_f64; h]; n];
for i in 0..n {
for t in 0..h {
for j in 0..m {
out[i][t] += hm.s[i][j] * py[j][t];
}
}
}
Ok(out)
}
#[derive(Debug, Clone)]
pub struct DeepReconciler {
n_bottom: usize,
n_all: usize,
horizon: usize,
w1: Vec<Vec<f64>>,
b1: Vec<f64>,
w2: Vec<Vec<f64>>,
b2: Vec<f64>,
w3: Vec<Vec<f64>>,
b3: Vec<f64>,
}
impl DeepReconciler {
pub fn reconcile(&self, bottom_fc: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
if bottom_fc.len() != self.n_bottom {
return Err(TimeSeriesError::DimensionMismatch {
expected: self.n_bottom,
actual: bottom_fc.len(),
});
}
let h = bottom_fc[0].len();
if h != self.horizon {
return Err(TimeSeriesError::InvalidInput(format!(
"Expected horizon {}, got {}",
self.horizon, h
)));
}
let input: Vec<f64> = bottom_fc
.iter()
.flat_map(|row| row.iter().copied())
.collect();
let h1 = relu(&affine(&self.w1, &self.b1, &input));
let h2 = relu(&affine(&self.w2, &self.b2, &h1));
let out_flat = affine(&self.w3, &self.b3, &h2);
let out = out_flat
.chunks(self.horizon)
.map(|chunk| chunk.to_vec())
.collect::<Vec<_>>();
if out.len() != self.n_all {
return Err(TimeSeriesError::ComputationError(
"DeepReconciler output shape mismatch".to_string(),
));
}
Ok(out)
}
}
#[derive(Debug, Clone)]
pub struct DeepReconcilerConfig {
pub hidden1: usize,
pub hidden2: usize,
pub learning_rate: f64,
pub epochs: usize,
pub batch_size: usize,
pub seed: u64,
}
impl Default for DeepReconcilerConfig {
fn default() -> Self {
Self {
hidden1: 64,
hidden2: 32,
learning_rate: 1e-3,
epochs: 50,
batch_size: 16,
seed: 12345,
}
}
}
pub fn train_deep_reconciler(
bottom_fc_history: &[Vec<Vec<f64>>],
actuals_history: &[Vec<Vec<f64>>],
n_all: usize,
config: &DeepReconcilerConfig,
) -> Result<DeepReconciler> {
let n_samples = bottom_fc_history.len();
if n_samples == 0 {
return Err(TimeSeriesError::InvalidInput(
"Training history must be non-empty".to_string(),
));
}
if actuals_history.len() != n_samples {
return Err(TimeSeriesError::DimensionMismatch {
expected: n_samples,
actual: actuals_history.len(),
});
}
let n_bottom = bottom_fc_history[0].len();
let horizon = if n_bottom > 0 {
bottom_fc_history[0][0].len()
} else {
return Err(TimeSeriesError::InvalidInput(
"bottom_fc_history must have at least one series".to_string(),
));
};
let input_dim = n_bottom * horizon;
let output_dim = n_all * horizon;
let h1 = config.hidden1;
let h2 = config.hidden2;
let mut lcg = config.seed;
let he_scale1 = (2.0 / input_dim as f64).sqrt();
let he_scale2 = (2.0 / h1 as f64).sqrt();
let he_scale3 = (2.0 / h2 as f64).sqrt();
let mut w1 = random_matrix(h1, input_dim, he_scale1, &mut lcg);
let mut b1 = vec![0.0_f64; h1];
let mut w2 = random_matrix(h2, h1, he_scale2, &mut lcg);
let mut b2 = vec![0.0_f64; h2];
let mut w3 = random_matrix(output_dim, h2, he_scale3, &mut lcg);
let mut b3 = vec![0.0_f64; output_dim];
let lr = config.learning_rate;
for _epoch in 0..config.epochs {
let mut indices: Vec<usize> = (0..n_samples).collect();
lcg_shuffle(&mut indices, &mut lcg);
for batch_start in (0..n_samples).step_by(config.batch_size) {
let batch_end = (batch_start + config.batch_size).min(n_samples);
let batch = &indices[batch_start..batch_end];
let mut dw1 = vec![vec![0.0_f64; input_dim]; h1];
let mut db1 = vec![0.0_f64; h1];
let mut dw2 = vec![vec![0.0_f64; h1]; h2];
let mut db2 = vec![0.0_f64; h2];
let mut dw3 = vec![vec![0.0_f64; h2]; output_dim];
let mut db3 = vec![0.0_f64; output_dim];
for &idx in batch {
let x: Vec<f64> = bottom_fc_history[idx]
.iter()
.flat_map(|r| r.iter().copied())
.collect();
let y_target: Vec<f64> = actuals_history[idx]
.iter()
.flat_map(|r| r.iter().copied())
.collect();
let z1 = affine(&w1, &b1, &x);
let a1 = relu(&z1);
let z2 = affine(&w2, &b2, &a1);
let a2 = relu(&z2);
let z3 = affine(&w3, &b3, &a2);
let mut d3: Vec<f64> = z3.iter().zip(y_target.iter()).map(|(o, t)| o - t).collect();
let scale = 2.0 / output_dim as f64;
for v in &mut d3 {
*v *= scale;
}
for i in 0..output_dim {
db3[i] += d3[i];
for j in 0..h2 {
dw3[i][j] += d3[i] * a2[j];
}
}
let mut d2 = vec![0.0_f64; h2];
for j in 0..h2 {
let grad: f64 = (0..output_dim).map(|i| w3[i][j] * d3[i]).sum();
d2[j] = if z2[j] > 0.0 { grad } else { 0.0 };
}
for i in 0..h2 {
db2[i] += d2[i];
for j in 0..h1 {
dw2[i][j] += d2[i] * a1[j];
}
}
let mut d1 = vec![0.0_f64; h1];
for j in 0..h1 {
let grad: f64 = (0..h2).map(|i| w2[i][j] * d2[i]).sum();
d1[j] = if z1[j] > 0.0 { grad } else { 0.0 };
}
for i in 0..h1 {
db1[i] += d1[i];
for j in 0..input_dim {
dw1[i][j] += d1[i] * x[j];
}
}
}
let batch_size = batch.len() as f64;
for i in 0..h1 {
b1[i] -= lr * db1[i] / batch_size;
for j in 0..input_dim {
w1[i][j] -= lr * dw1[i][j] / batch_size;
}
}
for i in 0..h2 {
b2[i] -= lr * db2[i] / batch_size;
for j in 0..h1 {
w2[i][j] -= lr * dw2[i][j] / batch_size;
}
}
for i in 0..output_dim {
b3[i] -= lr * db3[i] / batch_size;
for j in 0..h2 {
w3[i][j] -= lr * dw3[i][j] / batch_size;
}
}
}
}
Ok(DeepReconciler {
n_bottom,
n_all,
horizon,
w1,
b1,
w2,
b2,
w3,
b3,
})
}
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReconcileMethod {
BottomUp,
MinT,
Erm,
}
impl Default for ReconcileMethod {
fn default() -> Self {
Self::BottomUp
}
}
#[derive(Debug, Clone)]
pub struct HierarchicalForecasterConfig {
pub method: ReconcileMethod,
pub horizon: usize,
}
impl Default for HierarchicalForecasterConfig {
fn default() -> Self {
Self {
method: ReconcileMethod::BottomUp,
horizon: 1,
}
}
}
pub struct HierarchicalForecaster {
config: HierarchicalForecasterConfig,
hm: HierarchyMatrix,
residuals_val: Option<Vec<Vec<f64>>>,
base_fc_val: Option<Vec<Vec<f64>>>,
actuals_val: Option<Vec<Vec<f64>>>,
}
impl HierarchicalForecaster {
pub fn new(hm: HierarchyMatrix, config: HierarchicalForecasterConfig) -> Self {
Self {
config,
hm,
residuals_val: None,
base_fc_val: None,
actuals_val: None,
}
}
pub fn with_validation(
mut self,
residuals: Vec<Vec<f64>>,
base_fc_val: Vec<Vec<f64>>,
actuals_val: Vec<Vec<f64>>,
) -> Self {
self.residuals_val = Some(residuals);
self.base_fc_val = Some(base_fc_val);
self.actuals_val = Some(actuals_val);
self
}
pub fn reconcile(&self, base_fc: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
match self.config.method {
ReconcileMethod::BottomUp => {
let bottom = extract_bottom_rows(base_fc, &self.hm)?;
bottom_up_reconcile(&bottom, &self.hm)
}
ReconcileMethod::MinT => {
let empty: Vec<Vec<f64>> = Vec::new();
let residuals = self.residuals_val.as_deref().unwrap_or(&empty);
mint_reconcile(base_fc, residuals, &self.hm)
}
ReconcileMethod::Erm => {
let empty: Vec<Vec<f64>> = Vec::new();
let bv = self.base_fc_val.as_deref().ok_or_else(|| {
TimeSeriesError::InvalidInput(
"ERM requires validation base forecasts".to_string(),
)
})?;
let av = self.actuals_val.as_deref().ok_or_else(|| {
TimeSeriesError::InvalidInput("ERM requires validation actuals".to_string())
})?;
erm_reconcile(bv, av, &self.hm, base_fc)
}
_ => {
let bottom = extract_bottom_rows(base_fc, &self.hm)?;
bottom_up_reconcile(&bottom, &self.hm)
}
}
}
}
fn affine(w: &[Vec<f64>], b: &[f64], x: &[f64]) -> Vec<f64> {
let out_dim = w.len();
let in_dim = x.len();
(0..out_dim)
.map(|i| {
let s: f64 = (0..in_dim.min(w[i].len())).map(|j| w[i][j] * x[j]).sum();
s + b[i]
})
.collect()
}
fn relu(x: &[f64]) -> Vec<f64> {
x.iter().map(|&v| if v > 0.0 { v } else { 0.0 }).collect()
}
fn random_matrix(rows: usize, cols: usize, scale: f64, lcg: &mut u64) -> Vec<Vec<f64>> {
let mut m = vec![vec![0.0_f64; cols]; rows];
for i in 0..rows {
for j in 0..cols {
m[i][j] = lcg_normal_rand(lcg) * scale;
}
}
m
}
fn lcg_normal_rand(state: &mut u64) -> f64 {
fn lcg_u(s: &mut u64) -> f64 {
*s = s
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
((*s >> 11) as f64) / ((1u64 << 53) as f64)
}
let u1 = lcg_u(state).max(1e-15);
let u2 = lcg_u(state);
(-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
}
fn lcg_shuffle(v: &mut Vec<usize>, state: &mut u64) {
for i in (1..v.len()).rev() {
*state = state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
let j = (*state as usize) % (i + 1);
v.swap(i, j);
}
}
fn sample_covariance(data: &[Vec<f64>], n: usize) -> Vec<Vec<f64>> {
let t = data.len() as f64;
let means: Vec<f64> = (0..n)
.map(|j| data.iter().map(|row| row[j]).sum::<f64>() / t)
.collect();
let mut cov = vec![vec![0.0_f64; n]; n];
for row in data {
for i in 0..n {
for j in 0..n {
cov[i][j] += (row[i] - means[i]) * (row[j] - means[j]);
}
}
}
let denom = (t - 1.0).max(1.0);
for i in 0..n {
for j in 0..n {
cov[i][j] /= denom;
}
}
cov
}
fn mat_inverse_lu(a: &[Vec<f64>], n: usize) -> Result<Vec<Vec<f64>>> {
let mut aug: Vec<Vec<f64>> = (0..n)
.map(|i| {
let mut row = a[i].clone();
while row.len() < n {
row.push(0.0);
}
let mut r = row[..n].to_vec();
r.extend(std::iter::repeat(0.0).take(n));
r[n + i] = 1.0;
r
})
.collect();
for col in 0..n {
let mut max_val = aug[col][col].abs();
let mut max_row = col;
for row in (col + 1)..n {
if aug[row][col].abs() > max_val {
max_val = aug[row][col].abs();
max_row = row;
}
}
aug.swap(col, max_row);
let pivot = aug[col][col];
if pivot.abs() < 1e-14 {
return Err(TimeSeriesError::NumericalInstability(
"Singular matrix in mat_inverse_lu".to_string(),
));
}
for j in 0..(2 * n) {
aug[col][j] /= pivot;
}
for row in 0..n {
if row != col {
let factor = aug[row][col];
for j in 0..(2 * n) {
let v = aug[col][j] * factor;
aug[row][j] -= v;
}
}
}
}
let mut inv = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
inv[i][j] = aug[i][n + j];
}
}
Ok(inv)
}
fn mat_mul_square(a: &[Vec<f64>], b: &[Vec<f64>], n: usize) -> Vec<Vec<f64>> {
let mut c = vec![vec![0.0_f64; n]; n];
for i in 0..n {
for j in 0..n {
for k in 0..n {
c[i][j] += a[i][k] * b[k][j];
}
}
}
c
}
fn mat_mul_rect(a: &[Vec<f64>], b: &[Vec<f64>], r: usize, k: usize, c: usize) -> Vec<Vec<f64>> {
let mut out = vec![vec![0.0_f64; c]; r];
for i in 0..r {
for j in 0..c {
for l in 0..k {
out[i][j] += a[i][l] * b[l][j];
}
}
}
out
}
fn transpose_mat(a: &[Vec<f64>], r: usize, c: usize) -> Vec<Vec<f64>> {
let mut t = vec![vec![0.0_f64; r]; c];
for i in 0..r {
for j in 0..c {
t[j][i] = a[i][j];
}
}
t
}
#[cfg(test)]
mod tests {
use super::*;
fn simple_hierarchy() -> HierarchyMatrix {
let hier = vec![
(0usize, vec![1usize, 2usize]),
(1usize, vec![3usize, 4usize]),
(2usize, vec![5usize, 6usize]),
];
build_s_matrix(&hier).expect("build_s_matrix failed")
}
#[test]
fn test_s_matrix_construction() {
let hm = simple_hierarchy();
assert_eq!(hm.n_all, 7);
assert_eq!(hm.n_bottom, 4);
let total_row_idx = 0;
let total_sum: f64 = hm.s[total_row_idx].iter().sum();
assert!((total_sum - 4.0).abs() < 1e-10, "Total row sum should be 4");
}
#[test]
fn test_bottom_up_reconciliation() {
let hm = simple_hierarchy();
let bottom_fc = vec![
vec![10.0, 11.0],
vec![20.0, 22.0],
vec![30.0, 33.0],
vec![40.0, 44.0],
];
let result = bottom_up_reconcile(&bottom_fc, &hm).expect("bottom_up failed");
assert_eq!(result.len(), 7);
let total_sum = result[0][0];
assert!(
(total_sum - 100.0).abs() < 1e-9,
"Total step 0 = {total_sum}, expected 100"
);
}
#[test]
fn test_mint_preserves_bottom() {
let hm = simple_hierarchy();
let base_fc: Vec<Vec<f64>> = (0..7)
.map(|i| vec![i as f64 + 1.0, i as f64 + 2.0])
.collect();
let residuals: Vec<Vec<f64>> = (0..20)
.map(|t| (0..7).map(|i| if i == t % 7 { 1.0 } else { 0.0 }).collect())
.collect();
let result = mint_reconcile(&base_fc, &residuals, &hm).expect("mint failed");
assert_eq!(result.len(), 7);
for row in &result {
for v in row {
assert!(v.is_finite(), "MinT produced non-finite value");
}
}
}
#[test]
fn test_erm_reconcile_shape() {
let hm = simple_hierarchy();
let n = hm.n_all;
let h = 3;
let n_obs = 20;
let base_fc_val: Vec<Vec<f64>> = (0..n)
.map(|i| (0..n_obs).map(|t| (i + t) as f64 * 0.1).collect())
.collect();
let actuals_val: Vec<Vec<f64>> = (0..n)
.map(|i| (0..n_obs).map(|t| (i + t) as f64 * 0.1 + 0.05).collect())
.collect();
let new_fc: Vec<Vec<f64>> = (0..n).map(|i| vec![i as f64; h]).collect();
let result =
erm_reconcile(&base_fc_val, &actuals_val, &hm, &new_fc).expect("erm_reconcile failed");
assert_eq!(result.len(), n, "ERM output should have n_all rows");
assert_eq!(result[0].len(), h, "ERM output should have horizon cols");
}
#[test]
fn test_deep_reconciler_shape() {
let n_bottom = 4;
let n_all = 7;
let horizon = 3;
let n_samples = 30;
let mut lcg: u64 = 42;
let lcg_rand = |s: &mut u64| -> f64 {
*s = s
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
((*s >> 11) as f64) / ((1u64 << 53) as f64)
};
let bottom_history: Vec<Vec<Vec<f64>>> = (0..n_samples)
.map(|_| {
(0..n_bottom)
.map(|_| (0..horizon).map(|_| lcg_rand(&mut lcg)).collect())
.collect()
})
.collect();
let actuals_history: Vec<Vec<Vec<f64>>> = (0..n_samples)
.map(|_| {
(0..n_all)
.map(|_| (0..horizon).map(|_| lcg_rand(&mut lcg)).collect())
.collect()
})
.collect();
let config = DeepReconcilerConfig {
hidden1: 16,
hidden2: 8,
epochs: 3,
..Default::default()
};
let reconciler = train_deep_reconciler(&bottom_history, &actuals_history, n_all, &config)
.expect("train failed");
let new_bottom: Vec<Vec<f64>> = (0..n_bottom).map(|_| vec![1.0; horizon]).collect();
let out = reconciler.reconcile(&new_bottom).expect("reconcile failed");
assert_eq!(out.len(), n_all, "Output should have n_all rows");
assert_eq!(out[0].len(), horizon, "Output should have horizon cols");
}
#[test]
fn test_deep_reconciler_improves() {
let n_bottom = 3;
let n_all = 5;
let horizon = 2;
let n_samples = 40;
let mut lcg: u64 = 777;
let lcg_rand = |s: &mut u64| -> f64 {
*s = s
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1_442_695_040_888_963_407);
((*s >> 11) as f64) / ((1u64 << 53) as f64)
};
let bottom_history: Vec<Vec<Vec<f64>>> = (0..n_samples)
.map(|_| {
(0..n_bottom)
.map(|_| (0..horizon).map(|_| lcg_rand(&mut lcg)).collect())
.collect()
})
.collect();
let actuals_history: Vec<Vec<Vec<f64>>> = bottom_history
.iter()
.map(|b| {
let mut all = b.clone();
for _ in n_bottom..n_all {
let agg: Vec<f64> = (0..horizon)
.map(|t| b.iter().map(|series| series[t]).sum::<f64>())
.collect();
all.push(agg);
}
all
})
.collect();
let config = DeepReconcilerConfig {
hidden1: 32,
hidden2: 16,
epochs: 20,
learning_rate: 1e-2,
batch_size: 8,
seed: 42,
};
let reconciler = train_deep_reconciler(&bottom_history, &actuals_history, n_all, &config)
.expect("train failed");
let mse: f64 = bottom_history
.iter()
.zip(actuals_history.iter())
.map(|(b, act)| {
let pred = reconciler.reconcile(b).expect("reconcile failed");
let err: f64 = pred
.iter()
.zip(act.iter())
.flat_map(|(p_row, a_row)| {
p_row.iter().zip(a_row.iter()).map(|(p, a)| (p - a).powi(2))
})
.sum();
err
})
.sum::<f64>()
/ n_samples as f64;
assert!(
mse.is_finite(),
"MSE should be finite after training, got {mse}"
);
assert!(mse < 1000.0, "MSE = {mse} is unreasonably large");
}
#[test]
fn test_hierarchy_config_default() {
let cfg = HierarchicalForecasterConfig::default();
assert_eq!(cfg.method, ReconcileMethod::BottomUp);
assert_eq!(cfg.horizon, 1);
let dr_cfg = DeepReconcilerConfig::default();
assert_eq!(dr_cfg.hidden1, 64);
assert_eq!(dr_cfg.epochs, 50);
}
}