use ndarray::Array1;
use ndarray::Array2;
use stochastic_rs_core::simd_rng::Deterministic;
use stochastic_rs_core::simd_rng::SeedExt;
use stochastic_rs_core::simd_rng::Unseeded;
use crate::traits::FloatExt;
use crate::traits::ProcessExt;
pub struct MultivariateHawkes<T: FloatExt, S: SeedExt = Unseeded> {
pub mu: Array1<T>,
pub alpha: Array2<T>,
pub beta: Array2<T>,
pub t_max: T,
pub seed: S,
}
impl<T: FloatExt> MultivariateHawkes<T> {
pub fn new(mu: Array1<T>, alpha: Array2<T>, beta: Array2<T>, t_max: T) -> Self {
let d = mu.len();
assert_eq!(alpha.shape(), [d, d], "alpha must be (D, D)");
assert_eq!(beta.shape(), [d, d], "beta must be (D, D)");
Self {
mu,
alpha,
beta,
t_max,
seed: Unseeded,
}
}
}
impl<T: FloatExt> MultivariateHawkes<T, Deterministic> {
pub fn seeded(mu: Array1<T>, alpha: Array2<T>, beta: Array2<T>, t_max: T, seed: u64) -> Self {
let d = mu.len();
assert_eq!(alpha.shape(), [d, d]);
assert_eq!(beta.shape(), [d, d]);
Self {
mu,
alpha,
beta,
t_max,
seed: Deterministic::new(seed),
}
}
}
impl<T: FloatExt, S: SeedExt> ProcessExt<T> for MultivariateHawkes<T, S> {
type Output = Vec<Array1<T>>;
fn sample(&self) -> Self::Output {
let mut rng = self.seed.rng();
let d = self.mu.len();
let mut s = vec![vec![T::zero(); d]; d];
let mut t = T::zero();
let mut events: Vec<Vec<T>> = (0..d).map(|_| vec![T::zero()]).collect();
while t < self.t_max {
let mut lambdas = vec![T::zero(); d];
for i in 0..d {
lambdas[i] = self.mu[i];
for j in 0..d {
lambdas[i] += s[i][j];
}
}
let lambda_bar: T = lambdas.iter().copied().sum();
if lambda_bar <= T::zero() {
break;
}
let u = T::one() - T::sample_uniform_simd(&mut rng);
let dt = -u.ln() / lambda_bar;
t += dt;
if t >= self.t_max {
break;
}
for i in 0..d {
for j in 0..d {
s[i][j] = s[i][j] * (-self.beta[[i, j]] * dt).exp();
}
}
for i in 0..d {
lambdas[i] = self.mu[i];
for j in 0..d {
lambdas[i] += s[i][j];
}
}
let v = T::sample_uniform_simd(&mut rng) * lambda_bar;
let mut cumsum = T::zero();
for i in 0..d {
cumsum += lambdas[i];
if v <= cumsum {
events[i].push(t);
for k in 0..d {
s[k][i] += self.alpha[[k, i]];
}
break;
}
}
}
events.into_iter().map(Array1::from_vec).collect()
}
}
#[cfg(test)]
mod tests {
use ndarray::Array2;
use ndarray::array;
use super::*;
#[test]
fn bivariate_hawkes_runs() {
let mu = array![1.0_f64, 1.5];
let alpha = Array2::from_shape_vec((2, 2), vec![0.3, 0.1, 0.2, 0.4]).unwrap();
let beta = Array2::from_shape_vec((2, 2), vec![2.0, 2.0, 2.0, 2.0]).unwrap();
let h = MultivariateHawkes::new(mu, alpha, beta, 10.0);
let events = h.sample();
assert_eq!(events.len(), 2);
assert!(events[0].len() > 1, "component 0 should have events");
assert!(events[1].len() > 1, "component 1 should have events");
}
#[test]
fn cross_excitation_increases_events() {
let mu = array![2.0_f64, 2.0];
let alpha_diag = Array2::from_shape_vec((2, 2), vec![0.5, 0.0, 0.0, 0.5]).unwrap();
let beta = Array2::from_shape_vec((2, 2), vec![3.0, 3.0, 3.0, 3.0]).unwrap();
let h1 = MultivariateHawkes::new(mu.clone(), alpha_diag, beta.clone(), 50.0);
let alpha_full = Array2::from_shape_vec((2, 2), vec![0.5, 0.5, 0.5, 0.5]).unwrap();
let h2 = MultivariateHawkes::new(mu, alpha_full, beta, 50.0);
let n_trials = 20;
let avg1: f64 = (0..n_trials)
.map(|_| (h1.sample()[0].len() + h1.sample()[1].len()) as f64)
.sum::<f64>()
/ n_trials as f64;
let avg2: f64 = (0..n_trials)
.map(|_| (h2.sample()[0].len() + h2.sample()[1].len()) as f64)
.sum::<f64>()
/ n_trials as f64;
assert!(
avg2 > avg1 * 0.9,
"cross-excited avg={avg2:.0} should exceed diagonal avg={avg1:.0}"
);
}
#[test]
fn univariate_matches_scalar() {
let mu = array![3.0_f64];
let alpha = Array2::from_shape_vec((1, 1), vec![1.0]).unwrap();
let beta = Array2::from_shape_vec((1, 1), vec![5.0]).unwrap();
let h = MultivariateHawkes::new(mu, alpha, beta, 10.0);
let events = h.sample();
assert_eq!(events.len(), 1);
assert!(events[0].len() > 2, "should have multiple events");
for w in events[0].as_slice().unwrap().windows(2) {
assert!(w[0] <= w[1], "events must be sorted");
}
}
}