Skip to main content

oxiphysics_core/stochastic/
functions.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use std::f64::consts::PI;
6
7use super::types::{GeometricBrownianMotion, Rng};
8
9pub(super) const K_B: f64 = 1.38e-23;
10/// Solves an Ito SDE using the Euler-Maruyama scheme.
11///
12/// Equation: `dx = drift(x, t)*dt + diffusion(x, t)*dW`
13///
14/// # Arguments
15/// * `x0` - Initial condition
16/// * `t_end` - End time
17/// * `dt` - Time step
18/// * `drift` - Drift function f(x, t)
19/// * `diffusion` - Diffusion function g(x, t)
20/// * `seed` - RNG seed
21///
22/// Returns a vector of x values at each time step.
23pub fn euler_maruyama(
24    x0: f64,
25    t_end: f64,
26    dt: f64,
27    drift: impl Fn(f64, f64) -> f64,
28    diffusion: impl Fn(f64, f64) -> f64,
29    seed: u64,
30) -> Vec<f64> {
31    let mut rng = Rng::new(seed);
32    let n = ((t_end / dt).ceil() as usize) + 1;
33    let mut path = Vec::with_capacity(n);
34    let mut x = x0;
35    let mut t = 0.0_f64;
36    path.push(x);
37    while t + dt <= t_end + 1e-12 * dt {
38        let dw = rng.next_normal() * dt.sqrt();
39        x += drift(x, t) * dt + diffusion(x, t) * dw;
40        t += dt;
41        path.push(x);
42    }
43    path
44}
45/// Solves an Ito SDE using the Milstein scheme (higher-order correction).
46///
47/// Equation: `dx = drift(x,t)*dt + diffusion(x,t)*dW + 0.5*g*g'*((dW)^2 - dt)`
48///
49/// # Arguments
50/// * `x0` - Initial condition
51/// * `t_end` - End time
52/// * `dt` - Time step
53/// * `drift` - Drift function f(x, t)
54/// * `diffusion` - Diffusion function g(x, t)
55/// * `diffusion_prime` - Derivative of diffusion w.r.t. x: g'(x, t)
56/// * `seed` - RNG seed
57///
58/// Returns a vector of x values at each time step.
59pub fn milstein(
60    x0: f64,
61    t_end: f64,
62    dt: f64,
63    drift: impl Fn(f64, f64) -> f64,
64    diffusion: impl Fn(f64, f64) -> f64,
65    diffusion_prime: impl Fn(f64, f64) -> f64,
66    seed: u64,
67) -> Vec<f64> {
68    let mut rng = Rng::new(seed);
69    let n = ((t_end / dt).ceil() as usize) + 1;
70    let mut path = Vec::with_capacity(n);
71    let mut x = x0;
72    let mut t = 0.0_f64;
73    path.push(x);
74    while t + dt <= t_end + 1e-12 * dt {
75        let dw = rng.next_normal() * dt.sqrt();
76        let g = diffusion(x, t);
77        let gp = diffusion_prime(x, t);
78        x += drift(x, t) * dt + g * dw + 0.5 * g * gp * (dw * dw - dt);
79        t += dt;
80        path.push(x);
81    }
82    path
83}
84/// Generate multiple Monte Carlo paths for a GBM and compute statistics.
85///
86/// Returns `(mean_path, std_path)` where each vector has length `n_steps + 1`.
87pub fn monte_carlo_gbm_paths(
88    s0: f64,
89    mu: f64,
90    sigma: f64,
91    t_end: f64,
92    dt: f64,
93    n_paths: usize,
94    seed: u64,
95) -> (Vec<f64>, Vec<f64>) {
96    let n_steps = ((t_end / dt).ceil() as usize) + 1;
97    let mut sum = vec![0.0_f64; n_steps];
98    let mut sum_sq = vec![0.0_f64; n_steps];
99    let gbm = GeometricBrownianMotion::new(mu, sigma);
100    for i in 0..n_paths {
101        let path = gbm.simulate_exact(s0, t_end, dt, seed.wrapping_add(i as u64));
102        for (j, &val) in path.iter().enumerate() {
103            if j < n_steps {
104                sum[j] += val;
105                sum_sq[j] += val * val;
106            }
107        }
108    }
109    let n = n_paths as f64;
110    let mean_path: Vec<f64> = sum.iter().map(|&s| s / n).collect();
111    let std_path: Vec<f64> = sum
112        .iter()
113        .zip(sum_sq.iter())
114        .map(|(&s, &s2)| {
115            let m = s / n;
116            ((s2 / n - m * m).max(0.0)).sqrt()
117        })
118        .collect();
119    (mean_path, std_path)
120}
121/// Generate Monte Carlo estimate of E\[f(S_T)\] for a GBM terminal value.
122///
123/// Returns `(mean, stderr)`.
124pub fn monte_carlo_terminal_estimate(
125    s0: f64,
126    mu: f64,
127    sigma: f64,
128    t_end: f64,
129    n_paths: usize,
130    f: impl Fn(f64) -> f64,
131    seed: u64,
132) -> (f64, f64) {
133    let mut rng = Rng::new(seed);
134    let drift = (mu - 0.5 * sigma * sigma) * t_end;
135    let vol = sigma * t_end.sqrt();
136    let mut sum = 0.0_f64;
137    let mut sum_sq = 0.0_f64;
138    for _ in 0..n_paths {
139        let z = rng.next_normal();
140        let s_t = s0 * (drift + vol * z).exp();
141        let val = f(s_t);
142        sum += val;
143        sum_sq += val * val;
144    }
145    let n = n_paths as f64;
146    let mean = sum / n;
147    let var = sum_sq / n - mean * mean;
148    let stderr = (var / n).sqrt();
149    (mean, stderr)
150}
151/// Monte Carlo with antithetic variates for variance reduction.
152///
153/// For each normal draw Z, also uses -Z to reduce variance.
154/// Returns `(mean, stderr)`.
155pub fn antithetic_terminal_estimate(
156    s0: f64,
157    mu: f64,
158    sigma: f64,
159    t_end: f64,
160    n_pairs: usize,
161    f: impl Fn(f64) -> f64,
162    seed: u64,
163) -> (f64, f64) {
164    let mut rng = Rng::new(seed);
165    let drift = (mu - 0.5 * sigma * sigma) * t_end;
166    let vol = sigma * t_end.sqrt();
167    let mut sum = 0.0_f64;
168    let mut sum_sq = 0.0_f64;
169    let n_total = 2 * n_pairs;
170    for _ in 0..n_pairs {
171        let z = rng.next_normal();
172        let s_plus = s0 * (drift + vol * z).exp();
173        let s_minus = s0 * (drift - vol * z).exp();
174        let avg = 0.5 * (f(s_plus) + f(s_minus));
175        sum += avg;
176        sum_sq += avg * avg;
177    }
178    let n = n_pairs as f64;
179    let mean = sum / n;
180    let var = sum_sq / n - mean * mean;
181    let stderr = (var / n).sqrt();
182    let _ = n_total;
183    (mean, stderr)
184}
185/// Monte Carlo with control variate for variance reduction.
186///
187/// Uses the terminal asset price itself as control variate since
188/// E\[S_T\] is known analytically for GBM.
189/// Returns `(mean, stderr)`.
190pub fn control_variate_terminal_estimate(
191    s0: f64,
192    mu: f64,
193    sigma: f64,
194    t_end: f64,
195    n_paths: usize,
196    f: impl Fn(f64) -> f64,
197    seed: u64,
198) -> (f64, f64) {
199    let mut rng = Rng::new(seed);
200    let drift = (mu - 0.5 * sigma * sigma) * t_end;
201    let vol = sigma * t_end.sqrt();
202    let expected_s_t = s0 * (mu * t_end).exp();
203    let mut vals = Vec::with_capacity(n_paths);
204    let mut controls = Vec::with_capacity(n_paths);
205    for _ in 0..n_paths {
206        let z = rng.next_normal();
207        let s_t = s0 * (drift + vol * z).exp();
208        vals.push(f(s_t));
209        controls.push(s_t);
210    }
211    let mean_val = vals.iter().sum::<f64>() / n_paths as f64;
212    let mean_ctrl = controls.iter().sum::<f64>() / n_paths as f64;
213    let mut cov = 0.0_f64;
214    let mut var_c = 0.0_f64;
215    for i in 0..n_paths {
216        cov += (vals[i] - mean_val) * (controls[i] - mean_ctrl);
217        var_c += (controls[i] - mean_ctrl) * (controls[i] - mean_ctrl);
218    }
219    let beta = if var_c.abs() > 1e-30 {
220        -cov / var_c
221    } else {
222        0.0
223    };
224    let mut sum = 0.0_f64;
225    let mut sum_sq = 0.0_f64;
226    for i in 0..n_paths {
227        let adj = vals[i] + beta * (controls[i] - expected_s_t);
228        sum += adj;
229        sum_sq += adj * adj;
230    }
231    let n = n_paths as f64;
232    let mean = sum / n;
233    let var = sum_sq / n - mean * mean;
234    let stderr = (var / n).sqrt();
235    (mean, stderr)
236}
237/// Computes the arithmetic mean of a slice of values.
238///
239/// Returns `0.0` if the slice is empty.
240pub fn mean(data: &[f64]) -> f64 {
241    if data.is_empty() {
242        return 0.0;
243    }
244    data.iter().sum::<f64>() / data.len() as f64
245}
246/// Computes the unbiased sample variance of a slice of values.
247///
248/// Returns `0.0` if the slice has fewer than 2 elements.
249pub fn variance(data: &[f64]) -> f64 {
250    if data.len() < 2 {
251        return 0.0;
252    }
253    let m = mean(data);
254    data.iter().map(|&x| (x - m) * (x - m)).sum::<f64>() / (data.len() - 1) as f64
255}
256/// Computes the sample standard deviation of a slice of values.
257///
258/// Returns `0.0` if the slice has fewer than 2 elements.
259pub fn std_dev(data: &[f64]) -> f64 {
260    variance(data).sqrt()
261}
262/// Computes the sample skewness of a slice of values.
263///
264/// Returns `0.0` if the slice has fewer than 3 elements.
265pub fn skewness(data: &[f64]) -> f64 {
266    if data.len() < 3 {
267        return 0.0;
268    }
269    let m = mean(data);
270    let n = data.len() as f64;
271    let m3: f64 = data.iter().map(|&x| (x - m).powi(3)).sum::<f64>() / n;
272    let m2: f64 = data.iter().map(|&x| (x - m).powi(2)).sum::<f64>() / n;
273    if m2.abs() < 1e-30 {
274        return 0.0;
275    }
276    m3 / m2.powf(1.5)
277}
278/// Computes the sample excess kurtosis of a slice of values.
279///
280/// Returns `0.0` if the slice has fewer than 4 elements.
281pub fn kurtosis(data: &[f64]) -> f64 {
282    if data.len() < 4 {
283        return 0.0;
284    }
285    let m = mean(data);
286    let n = data.len() as f64;
287    let m4: f64 = data.iter().map(|&x| (x - m).powi(4)).sum::<f64>() / n;
288    let m2: f64 = data.iter().map(|&x| (x - m).powi(2)).sum::<f64>() / n;
289    if m2.abs() < 1e-30 {
290        return 0.0;
291    }
292    m4 / (m2 * m2) - 3.0
293}
294/// Computes the autocorrelation of a time series at a given lag.
295///
296/// Uses the normalized estimator: `R(lag) = Cov(X_t, X_{t+lag}) / Var(X)`.
297/// Returns `0.0` if there are insufficient data points.
298pub fn autocorrelation(data: &[f64], lag: usize) -> f64 {
299    let n = data.len();
300    if n <= lag || n < 2 {
301        return 0.0;
302    }
303    let m = mean(data);
304    let var = data.iter().map(|&x| (x - m) * (x - m)).sum::<f64>() / n as f64;
305    if var == 0.0 {
306        return 0.0;
307    }
308    let cov: f64 = (0..n - lag)
309        .map(|i| (data[i] - m) * (data[i + lag] - m))
310        .sum::<f64>()
311        / (n - lag) as f64;
312    cov / var
313}
314/// Estimates the diffusion coefficient from mean-squared displacement (MSD) data.
315///
316/// Performs a linear regression of MSD vs time and returns `slope / 2`
317/// (the 1-D Einstein relation: `MSD = 2*D*t`).
318///
319/// Returns `0.0` if there are fewer than 2 data points.
320pub fn diffusion_coefficient(msd_data: &[(f64, f64)]) -> f64 {
321    let n = msd_data.len();
322    if n < 2 {
323        return 0.0;
324    }
325    let sum_t: f64 = msd_data.iter().map(|(t, _)| t).sum();
326    let sum_msd: f64 = msd_data.iter().map(|(_, m)| m).sum();
327    let sum_t2: f64 = msd_data.iter().map(|(t, _)| t * t).sum();
328    let sum_t_msd: f64 = msd_data.iter().map(|(t, m)| t * m).sum();
329    let n_f = n as f64;
330    let denom = n_f * sum_t2 - sum_t * sum_t;
331    if denom.abs() < 1e-30 {
332        return 0.0;
333    }
334    let slope = (n_f * sum_t_msd - sum_t * sum_msd) / denom;
335    slope / 2.0
336}
337/// Computes the running (cumulative) mean of a data series.
338pub fn running_mean(data: &[f64]) -> Vec<f64> {
339    let mut result = Vec::with_capacity(data.len());
340    let mut sum = 0.0_f64;
341    for (i, &x) in data.iter().enumerate() {
342        sum += x;
343        result.push(sum / (i + 1) as f64);
344    }
345    result
346}
347/// Computes the effective sample size using autocorrelation.
348///
349/// ESS = N / (1 + 2 * sum_{k=1}^{max_lag} rho(k))
350/// where rho(k) is the autocorrelation at lag k.
351pub fn effective_sample_size(data: &[f64], max_lag: usize) -> f64 {
352    let n = data.len();
353    if n < 2 {
354        return n as f64;
355    }
356    let mut tau = 1.0_f64;
357    for k in 1..=max_lag.min(n - 1) {
358        let rho = autocorrelation(data, k);
359        if rho < 0.0 {
360            break;
361        }
362        tau += 2.0 * rho;
363    }
364    n as f64 / tau
365}
366/// Kramers mean first-passage time (MFPT) for escape over a barrier.
367///
368/// Uses the Kramers high-friction formula:
369/// `τ = (2π / (ω_0 * ω_b)) * exp(β * ΔE)`
370///
371/// # Arguments
372/// * `barrier_height` — ΔE = barrier height above the minimum
373/// * `D` — diffusion coefficient (not directly used in the Kramers formula but
374///   kept as part of the signature for context; `D = kT / gamma`)
375/// * `omega_0` — angular frequency at the minimum (attempt frequency)
376/// * `omega_b` — absolute value of imaginary frequency at the saddle point
377///
378/// Returns the MFPT in units consistent with the input.
379#[allow(non_snake_case)]
380pub fn mean_first_passage_time(barrier_height: f64, D: f64, omega_0: f64, omega_b: f64) -> f64 {
381    let _ = D;
382    (2.0 * PI / (omega_0 * omega_b)) * (barrier_height / D).exp()
383}
384#[cfg(test)]
385mod tests {
386    use super::*;
387    use crate::LangevinDynamics;
388    use crate::OrnsteinUhlenbeck;
389    use crate::RandomWalk;
390    use crate::Rng;
391    use crate::WienerProcess;
392    use crate::stochastic::AntitheticGbm;
393
394    use crate::stochastic::ControlVariateGbm;
395    use crate::stochastic::EmpiricalFirstPassageTime;
396    use crate::stochastic::FractionalBrownianMotion;
397    use crate::stochastic::HestonModel;
398
399    use crate::stochastic::KleinmanKramers;
400    use crate::stochastic::LangevinIntegrator;
401
402    use crate::stochastic::MertonJumpDiffusion;
403    use crate::stochastic::MetropolisHastings;
404    use crate::stochastic::OuExactSampler;
405
406    use crate::stochastic::WienerSampler;
407    #[test]
408    fn test_wiener_process_statistics() {
409        let dt = 0.01;
410        let n = 10_000;
411        let mut wp = WienerProcess::new(42);
412        let increments: Vec<f64> = (0..n).map(|_| wp.increment(dt)).collect();
413        let m = mean(&increments);
414        let v = variance(&increments);
415        assert!(m.abs() < 0.05, "mean={m} not close to 0");
416        assert!((v - dt).abs() < 0.005, "variance={v} not close to dt={dt}");
417    }
418    #[test]
419    fn test_gbm_mean() {
420        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
421        let s0 = 100.0;
422        let t_end = 1.0;
423        let dt = 0.01;
424        let n_paths = 500;
425        let finals: Vec<f64> = (0..n_paths)
426            .map(|seed| {
427                let path = gbm.simulate(s0, t_end, dt, seed as u64);
428                *path.last().unwrap()
429            })
430            .collect();
431        let emp_mean = mean(&finals);
432        let ana_mean = gbm.analytical_mean(s0, t_end);
433        let ana_var = gbm.analytical_variance(s0, t_end);
434        let se = (ana_var / n_paths as f64).sqrt();
435        assert!(
436            (emp_mean - ana_mean).abs() < 3.0 * se,
437            "emp_mean={emp_mean}, ana_mean={ana_mean}, 3*se={}",
438            3.0 * se
439        );
440    }
441    #[test]
442    fn test_gbm_exact_simulation() {
443        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
444        let path = gbm.simulate_exact(100.0, 1.0, 0.01, 42);
445        assert!(path.len() > 90);
446        for &v in &path {
447            assert!(v > 0.0, "exact GBM produced non-positive value: {v}");
448        }
449    }
450    #[test]
451    fn test_ou_converges_to_mean() {
452        let ou = OrnsteinUhlenbeck::new(2.0, 1.5, 0.3);
453        let path = ou.simulate(0.0, 10.0, 0.01, 123);
454        let tail = &path[path.len() * 3 / 4..];
455        let m = mean(tail);
456        assert!(
457            (m - ou.mu).abs() < 0.2,
458            "OU mean={m}, expected close to mu={}",
459            ou.mu
460        );
461    }
462    #[test]
463    fn test_ou_stationary_variance() {
464        let ou = OrnsteinUhlenbeck::new(2.0, 0.0, 0.5);
465        let expected = ou.stationary_variance();
466        assert!((expected - 0.0625).abs() < 1e-10);
467    }
468    #[test]
469    fn test_ou_autocorrelation() {
470        let ou = OrnsteinUhlenbeck::new(2.0, 0.0, 0.5);
471        let rho = ou.autocorrelation_at(0.5);
472        let expected = (-1.0_f64).exp();
473        assert!((rho - expected).abs() < 1e-10);
474    }
475    #[test]
476    fn test_ou_analytical_mean() {
477        let ou = OrnsteinUhlenbeck::new(1.0, 5.0, 0.1);
478        let m = ou.analytical_mean_at(0.0, 10.0);
479        assert!((m - 5.0).abs() < 1e-3, "analytical mean = {m}");
480    }
481    #[test]
482    fn test_langevin_step_changes_position() {
483        let ld = LangevinDynamics::new(1.0e-26, 1e10, 300.0);
484        let mut rng = Rng::new(7);
485        let (x_new, _v_new) = ld.step(0.0, 0.0, 1.0e-21, 1e-15, &mut rng);
486        assert!(x_new.abs() > 0.0, "position did not change");
487    }
488    #[test]
489    fn test_langevin_baoab_step() {
490        let ld = LangevinDynamics::new(1.0e-26, 1e10, 300.0);
491        let mut rng = Rng::new(7);
492        let (x_new, v_new) = ld.step_baoab(0.0, 0.0, 1.0e-21, 1e-15, &mut rng);
493        assert!(
494            x_new.abs() > 0.0 || v_new.abs() > 0.0,
495            "BAOAB step had no effect"
496        );
497    }
498    #[test]
499    fn test_mean_and_variance_known_data() {
500        let data = [2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
501        let m = mean(&data);
502        let v = variance(&data);
503        assert!((m - 5.0).abs() < 1e-10, "mean={m}");
504        assert!((v - 4.571_428_571_428_571).abs() < 1e-6, "variance={v}");
505    }
506    #[test]
507    fn test_euler_maruyama_zero_diffusion() {
508        let x0 = 1.0;
509        let t_end = 2.0;
510        let dt = 0.001;
511        let path = euler_maruyama(x0, t_end, dt, |x, _t| -x, |_x, _t| 0.0, 42);
512        let x_final = *path.last().unwrap();
513        let expected = x0 * (-t_end).exp();
514        assert!(
515            (x_final - expected).abs() < 0.01,
516            "x_final={x_final}, expected={expected}"
517        );
518    }
519    #[test]
520    fn test_random_walk_dimension_2() {
521        let rw = RandomWalk::new(2, 1.0);
522        let path = rw.walk(100, 99);
523        assert_eq!(path.len(), 101, "path length mismatch");
524        for pos in &path {
525            assert_eq!(pos.len(), 2, "position has wrong dimension");
526        }
527    }
528    #[test]
529    fn test_rng_next_exponential() {
530        let mut rng = Rng::new(42);
531        let n = 5000;
532        let lambda = 2.0;
533        let samples: Vec<f64> = (0..n).map(|_| rng.next_exponential(lambda)).collect();
534        let m = mean(&samples);
535        assert!(
536            (m - 0.5).abs() < 0.05,
537            "exponential mean={m}, expected ~0.5"
538        );
539    }
540    #[test]
541    fn test_rng_next_poisson() {
542        let mut rng = Rng::new(42);
543        let n = 5000;
544        let lambda = 3.0;
545        let samples: Vec<f64> = (0..n).map(|_| rng.next_poisson(lambda) as f64).collect();
546        let m = mean(&samples);
547        assert!((m - 3.0).abs() < 0.2, "poisson mean={m}, expected ~3.0");
548    }
549    #[test]
550    fn test_merton_jump_diffusion_positive_prices() {
551        let model = MertonJumpDiffusion::new(0.05, 0.2, 1.0, -0.01, 0.05);
552        let path = model.simulate(100.0, 1.0, 0.01, 42);
553        for &v in &path {
554            assert!(v > 0.0, "Merton model produced non-positive price: {v}");
555        }
556    }
557    #[test]
558    fn test_merton_mean_jump_size() {
559        let model = MertonJumpDiffusion::new(0.05, 0.2, 1.0, 0.0, 0.0);
560        let mj = model.mean_jump_size();
561        assert!(mj.abs() < 1e-12, "mean_jump_size={mj}");
562    }
563    #[test]
564    fn test_heston_feller_condition() {
565        let model = HestonModel::new(0.05, 2.0, 0.04, 0.3, -0.7);
566        assert!(model.feller_satisfied());
567        let model2 = HestonModel::new(0.05, 0.1, 0.01, 0.5, -0.7);
568        assert!(!model2.feller_satisfied());
569    }
570    #[test]
571    fn test_heston_simulation_positive_variance() {
572        let model = HestonModel::new(0.05, 2.0, 0.04, 0.3, -0.7);
573        let (prices, variances) = model.simulate(100.0, 0.04, 1.0, 0.01, 42);
574        assert!(!prices.is_empty());
575        for &v in &variances {
576            assert!(v >= 0.0, "negative variance: {v}");
577        }
578    }
579    #[test]
580    fn test_monte_carlo_gbm_paths() {
581        let (mean_path, std_path) = monte_carlo_gbm_paths(100.0, 0.05, 0.2, 1.0, 0.1, 200, 42);
582        assert!(!mean_path.is_empty());
583        assert_eq!(mean_path.len(), std_path.len());
584        assert!((mean_path[0] - 100.0).abs() < 1e-10);
585        assert!(std_path[0].abs() < 1e-10);
586    }
587    #[test]
588    fn test_monte_carlo_terminal_identity() {
589        let (est, stderr) = monte_carlo_terminal_estimate(100.0, 0.05, 0.2, 1.0, 5000, |s| s, 42);
590        let expected = 100.0 * (0.05_f64).exp();
591        assert!(
592            (est - expected).abs() < 3.0 * stderr + 1.0,
593            "est={est}, expected={expected}, stderr={stderr}"
594        );
595    }
596    #[test]
597    fn test_antithetic_reduces_variance() {
598        let (_, se_standard) =
599            monte_carlo_terminal_estimate(100.0, 0.05, 0.2, 1.0, 1000, |s| s, 42);
600        let (_, se_anti) = antithetic_terminal_estimate(100.0, 0.05, 0.2, 1.0, 1000, |s| s, 42);
601        assert!(se_standard.is_finite());
602        assert!(se_anti.is_finite());
603    }
604    #[test]
605    fn test_control_variate_estimate() {
606        let (est, stderr) =
607            control_variate_terminal_estimate(100.0, 0.05, 0.2, 1.0, 2000, |s| s, 42);
608        let expected = 100.0 * (0.05_f64).exp();
609        assert!(
610            (est - expected).abs() < 3.0 * stderr + 2.0,
611            "est={est}, expected={expected}, stderr={stderr}"
612        );
613    }
614    #[test]
615    fn test_wiener_bridge_endpoints() {
616        let mut wp = WienerProcess::new(42);
617        let bridge = wp.bridge(0.0, 1.0, 0.01, 0.0, 1.0);
618        assert!(
619            (bridge[0].1 - 0.0).abs() < 1e-10,
620            "bridge start = {}",
621            bridge[0].1
622        );
623        let last = bridge.last().unwrap().1;
624        assert!(
625            (last - 1.0).abs() < 0.1,
626            "bridge end = {last}, expected ~1.0"
627        );
628    }
629    #[test]
630    fn test_gaussian_random_walk() {
631        let rw = RandomWalk::new(3, 0.1);
632        let path = rw.gaussian_walk(100, 42);
633        assert_eq!(path.len(), 101);
634        for pos in &path {
635            assert_eq!(pos.len(), 3);
636        }
637    }
638    #[test]
639    fn test_msd_computation() {
640        let rw = RandomWalk::new(1, 1.0);
641        let path = rw.walk(50, 42);
642        let msd = RandomWalk::msd(&path);
643        assert_eq!(msd.len(), path.len());
644        assert!(msd[0].abs() < 1e-10);
645    }
646    #[test]
647    fn test_skewness_symmetric() {
648        let data: Vec<f64> = (-50..=50).map(|i| i as f64).collect();
649        let s = skewness(&data);
650        assert!(s.abs() < 1e-10, "skewness of symmetric data = {s}");
651    }
652    #[test]
653    fn test_kurtosis_uniform() {
654        let data: Vec<f64> = (0..10000).map(|i| i as f64 / 10000.0).collect();
655        let k = kurtosis(&data);
656        assert!((k - (-1.2)).abs() < 0.05, "kurtosis = {k}, expected ~ -1.2");
657    }
658    #[test]
659    fn test_running_mean() {
660        let data = [1.0, 3.0, 5.0, 7.0];
661        let rm = running_mean(&data);
662        assert!((rm[0] - 1.0).abs() < 1e-10);
663        assert!((rm[1] - 2.0).abs() < 1e-10);
664        assert!((rm[2] - 3.0).abs() < 1e-10);
665        assert!((rm[3] - 4.0).abs() < 1e-10);
666    }
667    #[test]
668    fn test_effective_sample_size_iid() {
669        let mut rng = Rng::new(42);
670        let data: Vec<f64> = (0..1000).map(|_| rng.next_normal()).collect();
671        let ess = effective_sample_size(&data, 20);
672        assert!(ess > 500.0, "ESS={ess} too low for iid data");
673    }
674    #[test]
675    fn test_wiener_sampler_variance() {
676        let dt = 0.01;
677        let ws = WienerSampler::new(dt);
678        let mut rng = rand::rng();
679        let samples = ws.sample(10_000, &mut rng);
680        let m: f64 = samples.iter().sum::<f64>() / samples.len() as f64;
681        let v: f64 =
682            samples.iter().map(|x| (x - m).powi(2)).sum::<f64>() / (samples.len() - 1) as f64;
683        assert!(m.abs() < 0.05, "WienerSampler mean={m}");
684        assert!(
685            (v - dt).abs() < 0.005,
686            "WienerSampler variance={v}, expected ~{dt}"
687        );
688    }
689    #[test]
690    fn test_wiener_sampler_length() {
691        let ws = WienerSampler::new(0.1);
692        let mut rng = rand::rng();
693        let s = ws.sample(50, &mut rng);
694        assert_eq!(s.len(), 50);
695    }
696    #[test]
697    fn test_fbm_length() {
698        let fbm = FractionalBrownianMotion::new(0.7, 0.01);
699        let mut rng = rand::rng();
700        let s = fbm.sample(100, &mut rng);
701        assert_eq!(s.len(), 100);
702    }
703    #[test]
704    fn test_fbm_finite_values() {
705        let fbm = FractionalBrownianMotion::new(0.8, 0.01);
706        let mut rng = rand::rng();
707        let s = fbm.sample(200, &mut rng);
708        for &v in &s {
709            assert!(v.is_finite(), "fBm produced non-finite value");
710        }
711    }
712    #[test]
713    fn test_langevin_integrator_step_changes_x() {
714        let li = LangevinIntegrator::new(1.0, 1.0, 1.0);
715        let mut rng = rand::rng();
716        let x0 = 0.0_f64;
717        let mut changed = false;
718        for _ in 0..20 {
719            let x_new = li.step(x0, 10.0, 0.01, &mut rng);
720            if (x_new - x0).abs() > 1e-10 {
721                changed = true;
722                break;
723            }
724        }
725        assert!(changed, "LangevinIntegrator: x did not change");
726    }
727    #[test]
728    fn test_langevin_integrator_zero_force_diffuses() {
729        let li = LangevinIntegrator::new(1.0, 1.0, 1.0);
730        let mut rng = rand::rng();
731        let n = 1000;
732        let dt = 0.01;
733        let mut x = 0.0_f64;
734        for _ in 0..n {
735            x = li.step(x, 0.0, dt, &mut rng);
736        }
737        assert!(x.is_finite(), "LangevinIntegrator diverged");
738    }
739    #[test]
740    fn test_mh_acceptance_gaussian() {
741        let mh = MetropolisHastings::new(1.0);
742        let mut rng = rand::rng();
743        let mut n_accept = 0usize;
744        let n_steps = 1000;
745        let mut x = 0.0_f64;
746        for _ in 0..n_steps {
747            let (x_new, accepted) = mh.step(|xi| 0.5 * xi * xi, x, 0.5, &mut rng);
748            x = x_new;
749            if accepted {
750                n_accept += 1;
751            }
752        }
753        let accept_ratio = n_accept as f64 / n_steps as f64;
754        assert!(
755            accept_ratio > 0.2,
756            "MH acceptance ratio too low: {accept_ratio}"
757        );
758    }
759    #[test]
760    fn test_mh_returns_tuple() {
761        let mh = MetropolisHastings::new(1.0);
762        let mut rng = rand::rng();
763        let (x_new, _accepted) = mh.step(|x| x * x, 0.5, 0.1, &mut rng);
764        assert!(x_new.is_finite());
765    }
766    #[test]
767    fn test_kleinman_kramers_step() {
768        let kk = KleinmanKramers::new(1.0, 1.0, 1.0);
769        let mut rng = rand::rng();
770        let (x_new, v_new) = kk.step(0.0, 0.0, 1.0, 0.01, &mut rng);
771        assert!(x_new.is_finite());
772        assert!(v_new.is_finite());
773    }
774    #[test]
775    fn test_kleinman_kramers_many_steps() {
776        let kk = KleinmanKramers::new(0.5, 1.0, 1.0);
777        let mut rng = rand::rng();
778        let mut x = 0.0_f64;
779        let mut v = 0.0_f64;
780        for _ in 0..1000 {
781            let (xn, vn) = kk.step(x, v, -x, 0.001, &mut rng);
782            x = xn;
783            v = vn;
784        }
785        assert!(x.is_finite() && v.is_finite(), "KK dynamics diverged");
786    }
787    #[test]
788    fn test_mfpt_kramers_formula() {
789        let barrier = 1.0;
790        let d = 1.0;
791        let omega_0 = 1.0;
792        let omega_b = 1.0;
793        let tau = mean_first_passage_time(barrier, d, omega_0, omega_b);
794        let expected = 2.0 * PI * (1.0_f64).exp();
795        assert!(
796            (tau - expected).abs() < 1e-10,
797            "MFPT={tau}, expected={expected}"
798        );
799    }
800    #[test]
801    fn test_mfpt_increases_with_barrier() {
802        let tau_low = mean_first_passage_time(1.0, 1.0, 1.0, 1.0);
803        let tau_high = mean_first_passage_time(2.0, 1.0, 1.0, 1.0);
804        assert!(
805            tau_high > tau_low,
806            "MFPT should increase with barrier height"
807        );
808    }
809    #[test]
810    fn test_fpt_reasonable_range() {
811        let ou = OrnsteinUhlenbeck::new(2.0, 0.0, 1.0);
812        let fpt = EmpiricalFirstPassageTime::estimate_ou(&ou, 0.0, 1.0, 0.01, 0.0, 50.0, 200, 42);
813        assert!(fpt.is_finite() && fpt > 0.0, "FPT={fpt}");
814    }
815    #[test]
816    fn test_fpt_higher_barrier_longer() {
817        let ou = OrnsteinUhlenbeck::new(2.0, 0.0, 1.5);
818        let fpt_low =
819            EmpiricalFirstPassageTime::estimate_ou(&ou, 0.0, 0.5, 0.01, 0.0, 20.0, 200, 42);
820        let fpt_high =
821            EmpiricalFirstPassageTime::estimate_ou(&ou, 0.0, 2.0, 0.01, 0.0, 20.0, 200, 42);
822        assert!(
823            fpt_high >= fpt_low * 0.5,
824            "Higher barrier should not be much faster: low={fpt_low}, high={fpt_high}"
825        );
826    }
827    #[test]
828    fn test_antithetic_gbm_mean_accuracy() {
829        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
830        let s0 = 100.0;
831        let t = 1.0;
832        let n_pairs = 2000usize;
833        let (est, _se) = AntitheticGbm::estimate_mean(s0, &gbm, t, 0.01, n_pairs, 42);
834        let expected = gbm.analytical_mean(s0, t);
835        assert!(
836            (est - expected).abs() / expected < 0.05,
837            "antithetic GBM mean={est}, expected={expected}"
838        );
839    }
840    #[test]
841    fn test_antithetic_gbm_pairs_positive() {
842        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
843        let pairs = AntitheticGbm::generate_pairs(100.0, &gbm, 1.0, 0.01, 10, 7);
844        for &(a, b) in &pairs {
845            assert!(
846                a > 0.0 && b > 0.0,
847                "both paths must be positive: ({a}, {b})"
848            );
849        }
850    }
851    #[test]
852    fn test_control_variate_gbm_variance_reduced() {
853        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
854        let (est_cv, se_cv) = ControlVariateGbm::estimate(100.0, &gbm, 1.0, 0.01, 1000, |s| s, 42);
855        let expected = gbm.analytical_mean(100.0, 1.0);
856        assert!(
857            (est_cv - expected).abs() < 3.0 * se_cv + 2.0,
858            "CV estimate={est_cv}, expected={expected}"
859        );
860    }
861    #[test]
862    fn test_control_variate_gbm_payoff() {
863        let gbm = GeometricBrownianMotion::new(0.05, 0.2);
864        let s0 = 100.0;
865        let k = 100.0;
866        let (est, _se) =
867            ControlVariateGbm::estimate(s0, &gbm, 1.0, 0.01, 2000, |s| (s - k).max(0.0), 99);
868        assert!(
869            est > 0.0 && est < s0,
870            "Call payoff should be positive and < S0: {est}"
871        );
872    }
873    #[test]
874    fn test_ou_exact_sampler_mean_convergence() {
875        let ou = OrnsteinUhlenbeck::new(3.0, 2.0, 0.5);
876        let sampler = OuExactSampler::new(ou.clone());
877        let mut x = 0.0_f64;
878        let mut rng = Rng::new(42);
879        let dt = 0.1;
880        for _ in 0..10_000 {
881            x = sampler.step(x, dt, &mut rng);
882        }
883        assert!(
884            (x - 2.0).abs() < 0.5,
885            "OU exact sampler mean={x}, expected ~2.0"
886        );
887    }
888    #[test]
889    fn test_ou_exact_sampler_variance() {
890        let ou = OrnsteinUhlenbeck::new(2.0, 0.0, 0.4);
891        let sampler = OuExactSampler::new(ou.clone());
892        let expected_var = ou.stationary_variance();
893        let n = 50_000usize;
894        let dt = 0.05;
895        let mut x = 0.0_f64;
896        let mut rng = Rng::new(123);
897        for _ in 0..1000 {
898            x = sampler.step(x, dt, &mut rng);
899        }
900        let samples: Vec<f64> = (0..n)
901            .map(|_| {
902                x = sampler.step(x, dt, &mut rng);
903                x
904            })
905            .collect();
906        let m: f64 = samples.iter().sum::<f64>() / n as f64;
907        let v: f64 = samples.iter().map(|s| (s - m).powi(2)).sum::<f64>() / (n - 1) as f64;
908        assert!(
909            (v - expected_var).abs() / expected_var < 0.15,
910            "OU exact variance={v}, expected={expected_var}"
911        );
912    }
913}
914#[cfg(test)]
915mod tests_new_stochastic {
916
917    use crate::Rng;
918
919    use crate::stochastic::CirProcess;
920
921    use crate::stochastic::HullWhiteModel;
922
923    use crate::stochastic::LevyFlight;
924
925    use crate::stochastic::SabrModel;
926    use crate::stochastic::VarianceGammaProcess;
927
928    #[test]
929    fn test_levy_flight_path_length() {
930        let lf = LevyFlight::new(1.5, 1.0);
931        let path = lf.path(100, 42);
932        assert_eq!(
933            path.len(),
934            101,
935            "Lévy flight path should have n_steps+1 points"
936        );
937    }
938    #[test]
939    fn test_levy_flight_starts_at_zero() {
940        let lf = LevyFlight::new(1.5, 1.0);
941        let path = lf.path(50, 7);
942        assert!((path[0]).abs() < 1e-12, "path should start at 0");
943    }
944    #[test]
945    fn test_levy_flight_finite_values() {
946        let lf = LevyFlight::new(1.5, 1.0);
947        let path = lf.path(200, 123);
948        for &v in &path {
949            assert!(v.is_finite(), "Lévy flight produced non-finite value");
950        }
951    }
952    #[test]
953    fn test_levy_flight_gaussian_limit() {
954        let lf = LevyFlight::new(2.0, 1.0);
955        let mut rng = Rng::new(42);
956        let steps: Vec<f64> = (0..5000).map(|_| lf.sample_step(&mut rng)).collect();
957        let m: f64 = steps.iter().sum::<f64>() / steps.len() as f64;
958        assert!(
959            m.abs() < 0.2,
960            "Gaussian (alpha=2) steps should have mean ~0, got {m}"
961        );
962    }
963    #[test]
964    fn test_levy_flight_different_seeds_differ() {
965        let lf = LevyFlight::new(1.5, 1.0);
966        let path1 = lf.path(20, 1);
967        let path2 = lf.path(20, 2);
968        let differs = path1
969            .iter()
970            .zip(path2.iter())
971            .any(|(a, b)| (a - b).abs() > 1e-10);
972        assert!(differs, "different seeds should produce different paths");
973    }
974    #[test]
975    fn test_vg_path_length() {
976        let vg = VarianceGammaProcess::new(0.1, 0.2, 0.1);
977        let path = vg.simulate(1.0, 100, 42);
978        assert_eq!(path.len(), 101);
979    }
980    #[test]
981    fn test_vg_starts_at_zero() {
982        let vg = VarianceGammaProcess::new(0.0, 0.2, 0.1);
983        let path = vg.simulate(1.0, 50, 7);
984        assert!((path[0]).abs() < 1e-12);
985    }
986    #[test]
987    fn test_vg_finite_values() {
988        let vg = VarianceGammaProcess::new(0.1, 0.2, 0.05);
989        let path = vg.simulate(1.0, 200, 99);
990        for &v in &path {
991            assert!(v.is_finite(), "VG produced non-finite value");
992        }
993    }
994    #[test]
995    fn test_vg_mean_increment() {
996        let vg = VarianceGammaProcess::new(0.5, 0.2, 0.1);
997        let dt = 0.01;
998        assert!((vg.mean_increment(dt) - 0.5 * dt).abs() < 1e-12);
999    }
1000    #[test]
1001    fn test_vg_variance_increment() {
1002        let vg = VarianceGammaProcess::new(0.0, 0.3, 0.1);
1003        let dt = 0.01;
1004        let expected = 0.3 * 0.3 * dt;
1005        assert!((vg.variance_increment(dt) - expected).abs() < 1e-12);
1006    }
1007    #[test]
1008    fn test_vg_ensemble_mean() {
1009        let vg = VarianceGammaProcess::new(0.2, 0.1, 0.05);
1010        let n_paths = 2000usize;
1011        let t_end = 0.5;
1012        let finals: Vec<f64> = (0..n_paths)
1013            .map(|seed| {
1014                let path = vg.simulate(t_end, 50, seed as u64);
1015                *path.last().unwrap()
1016            })
1017            .collect();
1018        let emp_mean: f64 = finals.iter().sum::<f64>() / n_paths as f64;
1019        let expected_mean = vg.mean_increment(t_end);
1020        assert!(
1021            (emp_mean - expected_mean).abs() < 0.1,
1022            "VG ensemble mean={emp_mean}, expected~{expected_mean}"
1023        );
1024    }
1025    #[test]
1026    fn test_sabr_path_lengths() {
1027        let sabr = SabrModel::new(100.0, 0.3, 0.5, 0.4, -0.3);
1028        let (forwards, vols) = sabr.simulate(1.0, 100, 42);
1029        assert_eq!(forwards.len(), 101);
1030        assert_eq!(vols.len(), 101);
1031    }
1032    #[test]
1033    fn test_sabr_initial_values() {
1034        let sabr = SabrModel::new(100.0, 0.3, 0.5, 0.4, -0.3);
1035        let (forwards, vols) = sabr.simulate(1.0, 50, 7);
1036        assert!((forwards[0] - 100.0).abs() < 1e-10);
1037        assert!((vols[0] - 0.3).abs() < 1e-10);
1038    }
1039    #[test]
1040    fn test_sabr_vols_positive() {
1041        let sabr = SabrModel::new(100.0, 0.2, 0.5, 0.3, -0.2);
1042        let (_forwards, vols) = sabr.simulate(1.0, 200, 123);
1043        for &v in &vols {
1044            assert!(v >= 0.0, "SABR volatility should be non-negative, got {v}");
1045        }
1046    }
1047    #[test]
1048    fn test_sabr_implied_vol_atm_positive() {
1049        let sabr = SabrModel::new(100.0, 0.2, 0.5, 0.3, -0.2);
1050        let iv = sabr.implied_vol_approx(100.0, 1.0);
1051        assert!(iv > 0.0, "ATM implied vol should be positive, got {iv}");
1052    }
1053    #[test]
1054    fn test_sabr_implied_vol_otm() {
1055        let sabr = SabrModel::new(100.0, 0.2, 0.5, 0.3, -0.2);
1056        let iv_atm = sabr.implied_vol_approx(100.0, 1.0);
1057        let iv_otm = sabr.implied_vol_approx(102.0, 1.0);
1058        assert!(iv_atm.is_finite() && iv_atm > 0.0, "ATM iv={iv_atm}");
1059        assert!(iv_otm.is_finite() && iv_otm > 0.0, "OTM iv={iv_otm}");
1060    }
1061    #[test]
1062    fn test_cir_path_length() {
1063        let cir = CirProcess::new(2.0, 0.05, 0.1);
1064        let path = cir.simulate(0.05, 1.0, 0.01, 42);
1065        assert!(path.len() > 90, "CIR path should have ~101 points");
1066    }
1067    #[test]
1068    fn test_cir_non_negative() {
1069        let cir = CirProcess::new(2.0, 0.05, 0.1);
1070        let path = cir.simulate(0.05, 1.0, 0.01, 99);
1071        for &v in &path {
1072            assert!(v >= 0.0, "CIR process should be non-negative, got {v}");
1073        }
1074    }
1075    #[test]
1076    fn test_cir_feller_condition() {
1077        let cir_ok = CirProcess::new(2.0, 0.05, 0.1);
1078        assert!(cir_ok.feller_satisfied());
1079        let cir_fail = CirProcess::new(0.1, 0.01, 0.5);
1080        assert!(!cir_fail.feller_satisfied());
1081    }
1082    #[test]
1083    fn test_cir_stationary_mean() {
1084        let cir = CirProcess::new(2.0, 0.05, 0.1);
1085        assert!((cir.stationary_mean() - 0.05).abs() < 1e-12);
1086    }
1087    #[test]
1088    fn test_cir_stationary_variance() {
1089        let cir = CirProcess::new(2.0, 0.05, 0.1);
1090        let expected = 0.05 * 0.01 / 4.0;
1091        assert!((cir.stationary_variance() - expected).abs() < 1e-12);
1092    }
1093    #[test]
1094    fn test_cir_conditional_mean_long_time() {
1095        let cir = CirProcess::new(3.0, 0.05, 0.1);
1096        let m = cir.conditional_mean(0.1, 10.0);
1097        assert!(
1098            (m - 0.05).abs() < 0.001,
1099            "long-time conditional mean={m}, expected~0.05"
1100        );
1101    }
1102    #[test]
1103    fn test_cir_convergence_to_stationary() {
1104        let cir = CirProcess::new(3.0, 0.05, 0.1);
1105        let path = cir.simulate(0.02, 20.0, 0.01, 42);
1106        let tail = &path[path.len() * 3 / 4..];
1107        let m: f64 = tail.iter().sum::<f64>() / tail.len() as f64;
1108        assert!((m - 0.05).abs() < 0.03, "CIR tail mean={m}, expected~0.05");
1109    }
1110    #[test]
1111    fn test_hull_white_path_length() {
1112        let hw = HullWhiteModel::new(0.5, 0.01, 0.03);
1113        let path = hw.simulate_short_rate(0.02, 1.0, 0.01, 42);
1114        assert!(path.len() > 90, "HW path should have ~101 points");
1115    }
1116    #[test]
1117    fn test_hull_white_mean_convergence() {
1118        let hw = HullWhiteModel::new(2.0, 0.01, 0.06);
1119        let path = hw.simulate_short_rate(0.02, 20.0, 0.01, 42);
1120        let tail = &path[path.len() * 3 / 4..];
1121        let m: f64 = tail.iter().sum::<f64>() / tail.len() as f64;
1122        assert!((m - 0.03).abs() < 0.02, "HW tail mean={m}, expected~0.03");
1123    }
1124    #[test]
1125    fn test_hull_white_analytical_mean() {
1126        let hw = HullWhiteModel::new(1.0, 0.01, 0.05);
1127        let m0 = hw.mean_rate(0.02, 0.0);
1128        assert!((m0 - 0.02).abs() < 1e-10);
1129    }
1130    #[test]
1131    fn test_hull_white_variance_zero_at_t0() {
1132        let hw = HullWhiteModel::new(0.5, 0.01, 0.03);
1133        let v = hw.variance_rate(0.0);
1134        assert!(v.abs() < 1e-12, "variance at t=0 should be 0, got {v}");
1135    }
1136    #[test]
1137    fn test_hull_white_variance_increases() {
1138        let hw = HullWhiteModel::new(0.5, 0.02, 0.03);
1139        let v1 = hw.variance_rate(0.5);
1140        let v2 = hw.variance_rate(1.0);
1141        assert!(v2 > v1, "variance should increase with time");
1142    }
1143    #[test]
1144    fn test_hull_white_bond_price_at_zero_maturity() {
1145        let hw = HullWhiteModel::new(0.5, 0.01, 0.03);
1146        let p = hw.bond_price(0.02, 0.0);
1147        assert!(
1148            (p - 1.0).abs() < 1e-6,
1149            "bond price at T=0 should be ~1, got {p}"
1150        );
1151    }
1152    #[test]
1153    fn test_hull_white_bond_price_positive() {
1154        let hw = HullWhiteModel::new(0.5, 0.01, 0.03);
1155        let p = hw.bond_price(0.02, 5.0);
1156        assert!(p > 0.0 && p < 1.0, "bond price should be in (0,1), got {p}");
1157    }
1158    #[test]
1159    fn test_hull_white_finite_path() {
1160        let hw = HullWhiteModel::new(0.5, 0.01, 0.03);
1161        let path = hw.simulate_short_rate(0.02, 5.0, 0.01, 99);
1162        for &r in &path {
1163            assert!(r.is_finite(), "HW short rate should be finite");
1164        }
1165    }
1166}