use ndarray::Array1;
use ndarray::Array2;
use ndarray::Axis;
use ndarray::s;
use stochastic_rs_core::simd_rng::Deterministic;
use stochastic_rs_core::simd_rng::SeedExt;
use stochastic_rs_core::simd_rng::Unseeded;
use stochastic_rs_distributions::normal::SimdNormal;
use crate::traits::FloatExt;
use crate::traits::ProcessExt;
pub struct Lmm<T: FloatExt, S: SeedExt = Unseeded> {
pub tenor: Array1<T>,
pub l0: Array1<T>,
pub sigma: Array1<T>,
pub chol: Option<Array2<T>>,
pub n: usize,
pub t: Option<T>,
pub seed: S,
}
impl<T: FloatExt> Lmm<T> {
pub fn new(tenor: Array1<T>, l0: Array1<T>, sigma: Array1<T>, n: usize, t: Option<T>) -> Self {
validate_lmm_inputs(&tenor, &l0, &sigma);
Self {
tenor,
l0,
sigma,
chol: None,
n,
t,
seed: Unseeded,
}
}
pub fn with_correlation(mut self, rho: Array2<T>) -> Self {
let m = self.l0.len();
assert_eq!(
rho.nrows(),
m,
"correlation rows must equal number of Libors"
);
assert_eq!(
rho.ncols(),
m,
"correlation cols must equal number of Libors"
);
self.chol = Some(cholesky_lower(&rho));
self
}
}
fn validate_lmm_inputs<T: FloatExt>(tenor: &Array1<T>, l0: &Array1<T>, sigma: &Array1<T>) {
let m_plus_1 = tenor.len();
assert!(m_plus_1 >= 2, "tenor must have at least two dates");
let m = m_plus_1 - 1;
assert_eq!(l0.len(), m, "l0 length must equal tenor.len() - 1");
assert_eq!(sigma.len(), m, "sigma length must equal tenor.len() - 1");
for i in 0..m {
assert!(tenor[i + 1] > tenor[i], "tenor must be strictly increasing");
assert!(l0[i] > T::zero(), "initial Libor L_n(0) must be positive");
assert!(sigma[i] >= T::zero(), "volatility σ_n must be non-negative");
}
}
impl<T: FloatExt> Lmm<T, Deterministic> {
pub fn seeded(
tenor: Array1<T>,
l0: Array1<T>,
sigma: Array1<T>,
n: usize,
t: Option<T>,
seed: u64,
) -> Self {
validate_lmm_inputs(&tenor, &l0, &sigma);
Self {
tenor,
l0,
sigma,
chol: None,
n,
t,
seed: Deterministic::new(seed),
}
}
pub fn with_correlation(mut self, rho: Array2<T>) -> Self {
let m = self.l0.len();
assert_eq!(rho.nrows(), m);
assert_eq!(rho.ncols(), m);
self.chol = Some(cholesky_lower(&rho));
self
}
}
impl<T: FloatExt, S: SeedExt> ProcessExt<T> for Lmm<T, S> {
type Output = Array2<T>;
fn sample(&self) -> Self::Output {
let m = self.l0.len();
let n_steps = self.n;
let mut path = Array2::<T>::zeros((m, n_steps));
if n_steps == 0 {
return path;
}
for (n, &l0) in self.l0.iter().enumerate() {
path[(n, 0)] = l0;
}
if n_steps == 1 {
return path;
}
let t_max = self.tenor[m];
let horizon = self.t.unwrap_or(t_max);
assert!(horizon > T::zero() && horizon <= t_max);
let n_increments = n_steps - 1;
let dt = horizon / T::from_usize_(n_increments);
let sqrt_dt = dt.sqrt();
let half = T::from_f64_fast(0.5);
let delta: Array1<T> = (0..m).map(|j| self.tenor[j + 1] - self.tenor[j]).collect();
let normal = SimdNormal::<T>::from_seed_source(T::zero(), T::one(), &self.seed);
let mut eps = Array2::<T>::zeros((m, n_increments));
{
let buf = eps
.as_slice_mut()
.expect("LMM eps buffer must be contiguous");
normal.fill_slice_fast(buf);
}
let z_corr: Array2<T> = match &self.chol {
Some(chol) => chol.dot(&eps),
None => eps,
};
let mut t_now = T::zero();
let mut eta: usize = 0;
let mut prev = self.l0.clone();
for k in 0..n_increments {
while eta < m && self.tenor[eta] <= t_now {
eta += 1;
}
let eta_idx = eta.saturating_sub(1).min(m);
let mut next = prev.clone();
for n in eta_idx..m {
if self.sigma[n] <= T::zero() {
continue;
}
let mut drift = T::zero();
match &self.chol {
Some(_chol) => {
let chol_ref = self.chol.as_ref().unwrap();
for j in eta_idx..=n {
let mut rho_nj = T::zero();
for p in 0..=j.min(n) {
rho_nj += chol_ref[(n, p)] * chol_ref[(j, p)];
}
let denom = T::one() + delta[j] * prev[j];
drift += (delta[j] * rho_nj * self.sigma[j] * prev[j]) / denom;
}
drift = drift * self.sigma[n];
}
None => {
let denom = T::one() + delta[n] * prev[n];
drift = self.sigma[n] * self.sigma[n] * delta[n] * prev[n] / denom;
}
}
let z_nk = z_corr[(n, k)];
let log_inc =
(drift - half * self.sigma[n] * self.sigma[n]) * dt + self.sigma[n] * sqrt_dt * z_nk;
next[n] = prev[n] * log_inc.exp();
}
let t_next = t_now + dt;
for n in 0..m {
if self.tenor[n] <= t_now {
next[n] = prev[n];
}
}
for n in 0..m {
path[(n, k + 1)] = next[n];
}
prev = next;
t_now = t_next;
}
let _ = path.axis_iter(Axis(0));
let _ = path.slice(s![.., ..]);
path
}
}
fn cholesky_lower<T: FloatExt>(rho: &Array2<T>) -> Array2<T> {
let m = rho.nrows();
assert_eq!(rho.ncols(), m, "correlation must be square");
let mut l = Array2::<T>::zeros((m, m));
for i in 0..m {
for j in 0..=i {
let mut sum = T::zero();
for k in 0..j {
sum += l[(i, k)] * l[(j, k)];
}
let v = rho[(i, j)] - sum;
if i == j {
assert!(v > T::zero(), "correlation matrix not positive-definite");
l[(i, j)] = v.sqrt();
} else {
l[(i, j)] = v / l[(j, j)];
}
}
}
l
}
#[cfg(test)]
mod tests {
use super::*;
fn flat_tenor(m: usize, dt: f64) -> Array1<f64> {
Array1::from_iter((0..=m).map(|i| i as f64 * dt))
}
#[test]
fn lmm_sample_runs_independent_factors() {
let m = 4;
let tenor = flat_tenor(m, 0.5); let l0 = Array1::from(vec![0.03, 0.035, 0.04, 0.045]);
let sigma = Array1::from(vec![0.20, 0.20, 0.20, 0.20]);
let lmm: Lmm<f64, Deterministic> = Lmm::seeded(tenor, l0, sigma, 100, Some(2.0), 42);
let path = lmm.sample();
assert_eq!(path.shape(), &[m, 100]);
for n in 0..m {
assert!(path[(n, 0)] > 0.0);
}
for v in path.iter() {
assert!(*v > 0.0, "LMM forward went non-positive");
}
}
#[test]
fn lmm_correlated_factors_run() {
let m = 3;
let tenor = flat_tenor(m, 1.0);
let l0 = Array1::from(vec![0.04, 0.045, 0.05]);
let sigma = Array1::from(vec![0.25, 0.20, 0.18]);
let mut rho = Array2::<f64>::eye(m);
for i in 0..m {
for j in 0..m {
if i != j {
rho[(i, j)] = 0.6;
}
}
}
let lmm: Lmm<f64, Deterministic> =
Lmm::seeded(tenor, l0, sigma, 50, Some(3.0), 123).with_correlation(rho);
let path = lmm.sample();
assert_eq!(path.shape(), &[m, 50]);
for v in path.iter() {
assert!(*v > 0.0);
}
}
#[test]
fn lmm_seeded_is_deterministic() {
let tenor = flat_tenor(2, 0.5);
let l0 = Array1::from(vec![0.03, 0.035]);
let sigma = Array1::from(vec![0.2, 0.2]);
let a: Lmm<f64, Deterministic> =
Lmm::seeded(tenor.clone(), l0.clone(), sigma.clone(), 30, Some(1.0), 7);
let b: Lmm<f64, Deterministic> = Lmm::seeded(tenor, l0, sigma, 30, Some(1.0), 7);
let pa = a.sample();
let pb = b.sample();
for (x, y) in pa.iter().zip(pb.iter()) {
assert_eq!(x, y, "seeded LMM not reproducible");
}
}
#[test]
fn cholesky_lower_recovers_identity() {
let rho = Array2::<f64>::eye(4);
let l = cholesky_lower::<f64>(&rho);
for i in 0..4 {
for j in 0..4 {
let expected: f64 = if i == j { 1.0 } else { 0.0 };
assert!((l[(i, j)] - expected).abs() < 1e-12_f64);
}
}
}
#[test]
fn cholesky_lower_recovers_2x2() {
let rho: Array2<f64> = ndarray::array![[1.0, 0.6], [0.6, 1.0]];
let l = cholesky_lower::<f64>(&rho);
let recon = l.dot(&l.t());
for i in 0..2 {
for j in 0..2 {
let diff: f64 = recon[(i, j)] - rho[(i, j)];
assert!(diff.abs() < 1e-12_f64);
}
}
}
}