1#![allow(clippy::needless_range_loop)]
2#![allow(dead_code)]
9#![allow(clippy::too_many_arguments)]
10
11use std::f64::consts::PI;
12
13struct Lcg {
17 state: u64,
18}
19
20impl Lcg {
21 fn new(seed: u64) -> Self {
22 Self {
23 state: seed.wrapping_add(1),
24 }
25 }
26 fn next_u64(&mut self) -> u64 {
27 self.state = self
28 .state
29 .wrapping_mul(6_364_136_223_846_793_005)
30 .wrapping_add(1_442_695_040_888_963_407);
31 self.state
32 }
33 fn uniform(&mut self) -> f64 {
35 (self.next_u64() >> 11) as f64 / (1u64 << 53) as f64
36 }
37 fn normal(&mut self) -> f64 {
39 let u1 = self.uniform().max(1e-300);
40 let u2 = self.uniform();
41 (-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos()
42 }
43 fn normal_ms(&mut self, mu: f64, sigma: f64) -> f64 {
45 mu + sigma * self.normal()
46 }
47}
48
49pub struct BrownianMotion;
53
54impl BrownianMotion {
55 pub fn standard(t: f64, n_steps: usize, seed: u64) -> Vec<f64> {
60 let dt = t / n_steps as f64;
61 let sqrt_dt = dt.sqrt();
62 let mut rng = Lcg::new(seed);
63 let mut path = vec![0.0f64; n_steps + 1];
64 for i in 1..=n_steps {
65 path[i] = path[i - 1] + sqrt_dt * rng.normal();
66 }
67 path
68 }
69
70 pub fn geometric(s0: f64, mu: f64, sigma: f64, t: f64, n_steps: usize, seed: u64) -> Vec<f64> {
74 let dt = t / n_steps as f64;
75 let drift = (mu - 0.5 * sigma * sigma) * dt;
76 let vol = sigma * dt.sqrt();
77 let mut rng = Lcg::new(seed);
78 let mut path = vec![s0; n_steps + 1];
79 for i in 1..=n_steps {
80 path[i] = path[i - 1] * (drift + vol * rng.normal()).exp();
81 }
82 path
83 }
84
85 pub fn fractional(hurst: f64, n_steps: usize, seed: u64) -> Vec<f64> {
90 assert!(
91 hurst > 0.0 && hurst < 1.0,
92 "Hurst exponent must be in (0, 1)"
93 );
94 let n = n_steps;
95 let gamma = |k: f64| -> f64 {
97 0.5 * ((k - 1.0).abs().powf(2.0 * hurst) - 2.0 * k.abs().powf(2.0 * hurst)
98 + (k + 1.0).abs().powf(2.0 * hurst))
99 };
100 let m = 2 * n;
102 let mut c = vec![0.0f64; m];
103 c[0] = 1.0;
104 for k in 1..n {
105 c[k] = gamma(k as f64);
106 c[m - k] = c[k];
107 }
108 let mut eigvals = vec![0.0f64; m];
110 for j in 0..m {
111 let mut sum = 0.0;
112 for k in 0..m {
113 sum += c[k] * (2.0 * PI * j as f64 * k as f64 / m as f64).cos();
114 }
115 eigvals[j] = sum / m as f64;
116 }
117 let mut rng = Lcg::new(seed);
118 let mut w_real = vec![0.0f64; m];
120 let mut w_imag = vec![0.0f64; m];
121 for k in 0..m {
122 let z1 = rng.normal();
123 let z2 = rng.normal();
124 let s = if eigvals[k] > 0.0 {
125 eigvals[k].sqrt()
126 } else {
127 0.0
128 };
129 w_real[k] = s * z1;
130 w_imag[k] = s * z2;
131 }
132 let mut path = vec![0.0f64; n + 1];
134 for i in 1..=n {
135 let mut sum = 0.0;
136 for k in 0..m {
137 sum += w_real[k] * (2.0 * PI * i as f64 * k as f64 / m as f64).cos()
138 - w_imag[k] * (2.0 * PI * i as f64 * k as f64 / m as f64).sin();
139 }
140 path[i] = sum;
141 }
142 for i in 1..=n {
144 path[i] += path[i - 1];
145 }
146 path
147 }
148
149 pub fn quadratic_variation(path: &[f64]) -> f64 {
153 path.windows(2).map(|w| (w[1] - w[0]).powi(2)).sum()
154 }
155
156 pub fn mean(path: &[f64]) -> f64 {
158 path.iter().sum::<f64>() / path.len() as f64
159 }
160
161 pub fn variance(path: &[f64]) -> f64 {
163 let m = Self::mean(path);
164 path.iter().map(|x| (x - m).powi(2)).sum::<f64>() / (path.len() - 1) as f64
165 }
166}
167
168pub struct PoissonProcess;
172
173impl PoissonProcess {
174 pub fn homogeneous(lambda: f64, t: f64, seed: u64) -> Vec<f64> {
178 assert!(lambda > 0.0, "Rate must be positive");
179 let mut rng = Lcg::new(seed);
180 let mut times = Vec::new();
181 let mut current = 0.0f64;
182 loop {
183 let u = rng.uniform().max(1e-300);
185 let inter = -u.ln() / lambda;
186 current += inter;
187 if current >= t {
188 break;
189 }
190 times.push(current);
191 }
192 times
193 }
194
195 pub fn inhomogeneous<F>(lambda: F, lambda_max: f64, t: f64, seed: u64) -> Vec<f64>
200 where
201 F: Fn(f64) -> f64,
202 {
203 let raw = Self::homogeneous(lambda_max, t, seed);
204 let mut rng = Lcg::new(seed.wrapping_add(1));
205 raw.into_iter()
206 .filter(|&ti| rng.uniform() < lambda(ti) / lambda_max)
207 .collect()
208 }
209
210 pub fn compound<F>(lambda: f64, t: f64, jump_dist: F, seed: u64) -> (Vec<f64>, Vec<f64>)
214 where
215 F: Fn(f64) -> f64,
216 {
217 let mut rng = Lcg::new(seed);
218 let mut times = Vec::new();
219 let mut values = vec![0.0f64];
220 let mut current = 0.0f64;
221 let mut cum = 0.0f64;
222 loop {
223 let u = rng.uniform().max(1e-300);
224 current += -u.ln() / lambda;
225 if current >= t {
226 break;
227 }
228 let jump = jump_dist(rng.uniform());
229 cum += jump;
230 times.push(current);
231 values.push(cum);
232 }
233 (times, values)
234 }
235
236 pub fn expected_count(lambda: f64, t: f64) -> f64 {
238 lambda * t
239 }
240
241 pub fn inter_arrival_cdf(lambda: f64, t: f64) -> f64 {
243 1.0 - (-lambda * t).exp()
244 }
245
246 pub fn empirical_rate(times: &[f64], t: f64) -> f64 {
248 times.len() as f64 / t
249 }
250}
251
252pub struct MarkovChain {
256 pub p: Vec<f64>,
258 pub n_states: usize,
260}
261
262impl MarkovChain {
263 pub fn new(p: Vec<f64>, n_states: usize) -> Self {
265 Self { p, n_states }
266 }
267
268 pub fn simulate(&self, init: usize, n_steps: usize, seed: u64) -> Vec<usize> {
270 let mut rng = Lcg::new(seed);
271 let mut path = vec![init];
272 let mut state = init;
273 for _ in 0..n_steps {
274 let u = rng.uniform();
275 let mut cum = 0.0;
276 let mut next = self.n_states - 1;
277 for j in 0..self.n_states {
278 cum += self.p[state * self.n_states + j];
279 if u < cum {
280 next = j;
281 break;
282 }
283 }
284 path.push(next);
285 state = next;
286 }
287 path
288 }
289
290 pub fn stationary_distribution(&self, tol: f64, max_iter: usize) -> Vec<f64> {
294 let n = self.n_states;
295 let mut pi = vec![1.0 / n as f64; n];
296 for _ in 0..max_iter {
297 let new_pi: Vec<f64> = (0..n)
298 .map(|j| (0..n).map(|i| pi[i] * self.p[i * n + j]).sum())
299 .collect();
300 let diff: f64 = new_pi.iter().zip(&pi).map(|(a, b)| (a - b).abs()).sum();
301 pi = new_pi;
302 if diff < tol {
303 break;
304 }
305 }
306 pi
307 }
308
309 pub fn n_step_matrix(&self, n: usize) -> Vec<f64> {
311 let k = self.n_states;
312 let mut result = vec![0.0f64; k * k];
313 for i in 0..k {
314 result[i * k + i] = 1.0;
315 } let mut base = self.p.clone();
317 let mut exp = n;
318 while exp > 0 {
319 if exp & 1 == 1 {
320 result = Self::matmul_sq(&result, &base, k);
321 }
322 base = Self::matmul_sq(&base, &base, k);
323 exp >>= 1;
324 }
325 result
326 }
327
328 pub fn mixing_time(&self, init: usize, eps: f64) -> usize {
330 let pi = self.stationary_distribution(1e-10, 2000);
331 let n = self.n_states;
332 let mut dist = vec![0.0f64; n];
333 dist[init] = 1.0;
334 for t in 1..10000 {
335 let new_dist: Vec<f64> = (0..n)
336 .map(|j| (0..n).map(|i| dist[i] * self.p[i * n + j]).sum())
337 .collect();
338 dist = new_dist;
339 let tv: f64 = dist
340 .iter()
341 .zip(&pi)
342 .map(|(a, b)| (a - b).abs())
343 .sum::<f64>()
344 * 0.5;
345 if tv < eps {
346 return t;
347 }
348 }
349 10000
350 }
351
352 pub fn empirical_transition_matrix(trajectory: &[usize], n_states: usize) -> Vec<f64> {
354 let mut counts = vec![0.0f64; n_states * n_states];
355 for w in trajectory.windows(2) {
356 counts[w[0] * n_states + w[1]] += 1.0;
357 }
358 for i in 0..n_states {
359 let row_sum: f64 = (0..n_states).map(|j| counts[i * n_states + j]).sum();
360 if row_sum > 0.0 {
361 for j in 0..n_states {
362 counts[i * n_states + j] /= row_sum;
363 }
364 }
365 }
366 counts
367 }
368
369 fn matmul_sq(a: &[f64], b: &[f64], n: usize) -> Vec<f64> {
370 let mut c = vec![0.0f64; n * n];
371 for i in 0..n {
372 for k in 0..n {
373 if a[i * n + k] == 0.0 {
374 continue;
375 }
376 for j in 0..n {
377 c[i * n + j] += a[i * n + k] * b[k * n + j];
378 }
379 }
380 }
381 c
382 }
383}
384
385pub struct ContinuousTimeMarkov {
389 pub q: Vec<f64>,
391 pub n_states: usize,
393}
394
395impl ContinuousTimeMarkov {
396 pub fn new(q: Vec<f64>, n_states: usize) -> Self {
398 Self { q, n_states }
399 }
400
401 pub fn simulate(&self, init: usize, t_max: f64, seed: u64) -> (Vec<f64>, Vec<usize>) {
405 let mut rng = Lcg::new(seed);
406 let n = self.n_states;
407 let mut times = vec![0.0f64];
408 let mut states = vec![init];
409 let mut state = init;
410 let mut t = 0.0f64;
411
412 loop {
413 let rate_out: f64 = -(self.q[state * n + state]);
415 if rate_out <= 1e-300 {
416 break;
417 } let dt = -rng.uniform().max(1e-300).ln() / rate_out;
419 t += dt;
420 if t >= t_max {
421 break;
422 }
423 let u = rng.uniform() * rate_out;
425 let mut cum = 0.0;
426 let mut next = state;
427 for j in 0..n {
428 if j == state {
429 continue;
430 }
431 cum += self.q[state * n + j];
432 if u < cum {
433 next = j;
434 break;
435 }
436 }
437 times.push(t);
438 states.push(next);
439 state = next;
440 }
441 (times, states)
442 }
443
444 pub fn kolmogorov_forward(&self, p0: &[f64], t: f64) -> Vec<f64> {
448 let n = self.n_states;
449 let qt: Vec<f64> = self.q.iter().map(|v| v * t).collect();
450 let exp_qt = expm_dense(&qt, n);
452 (0..n)
454 .map(|j| (0..n).map(|i| p0[i] * exp_qt[i * n + j]).sum())
455 .collect()
456 }
457
458 pub fn stationary_distribution(&self, tol: f64, max_iter: usize) -> Vec<f64> {
460 let n = self.n_states;
461 let dt = 1.0
464 / (0..n)
465 .map(|i| -self.q[i * n + i])
466 .fold(0.0f64, f64::max)
467 .max(1.0);
468 let p: Vec<f64> = (0..n * n)
470 .map(|idx| {
471 let i = idx / n;
472 let j = idx % n;
473 if i == j {
474 1.0 + dt * self.q[idx]
475 } else {
476 dt * self.q[idx]
477 }
478 })
479 .collect();
480 let mc = MarkovChain::new(p, n);
481 mc.stationary_distribution(tol, max_iter)
482 }
483
484 pub fn mean_first_passage_time(&self, from: usize, to: usize) -> f64 {
488 let pi = self.stationary_distribution(1e-10, 2000);
489 let qj = -(self.q[to * self.n_states + to]);
491 if qj < 1e-300 || pi[to] < 1e-300 {
492 return f64::INFINITY;
493 }
494 let _ = from;
495 1.0 / (pi[to] * qj)
496 }
497
498 pub fn mean_sojourn_time(&self, state: usize) -> f64 {
500 let rate = -(self.q[state * self.n_states + state]);
501 if rate > 1e-300 {
502 1.0 / rate
503 } else {
504 f64::INFINITY
505 }
506 }
507}
508
509fn expm_dense(a: &[f64], n: usize) -> Vec<f64> {
511 let id = eye_n(n);
512 let norm: f64 = a.iter().map(|v| v.abs()).sum::<f64>();
514 let s = ((norm / 1.0).log2().ceil() as i32).max(0) as u32;
515 let scale = 1.0 / (1u64 << s) as f64;
516 let a_s: Vec<f64> = a.iter().map(|v| v * scale).collect();
517 let a2 = mm(&a_s, &a_s, n);
518 let c0 = 1.0f64;
520 let c1 = 0.5;
521 let c2 = 3.0 / 26.0;
522 let c3 = 1.0 / 312.0;
523 let u: Vec<f64> = (0..n * n).map(|i| c3 * a2[i] + c1 * id[i]).collect();
524 let u = mv(&a_s, &u, n); let u: Vec<f64> = u.to_vec();
526 let v: Vec<f64> = (0..n * n).map(|i| c2 * a2[i] + c0 * id[i]).collect();
527 let num: Vec<f64> = v.iter().zip(&u).map(|(x, y)| x + y).collect();
529 let den: Vec<f64> = v.iter().zip(&u).map(|(x, y)| x - y).collect();
530 let inv_den = inv_n(&den, n);
531 let mut result = mm(&inv_den, &num, n);
532 for _ in 0..s {
533 result = mm(&result, &result, n);
534 }
535 result
536}
537
538fn eye_n(n: usize) -> Vec<f64> {
539 let mut m = vec![0.0f64; n * n];
540 for i in 0..n {
541 m[i * n + i] = 1.0;
542 }
543 m
544}
545
546fn mm(a: &[f64], b: &[f64], n: usize) -> Vec<f64> {
547 let mut c = vec![0.0f64; n * n];
548 for i in 0..n {
549 for k in 0..n {
550 if a[i * n + k] == 0.0 {
551 continue;
552 }
553 for j in 0..n {
554 c[i * n + j] += a[i * n + k] * b[k * n + j];
555 }
556 }
557 }
558 c
559}
560
561fn mv(a: &[f64], b: &[f64], n: usize) -> Vec<f64> {
562 mm(a, b, n)
563}
564
565fn inv_n(a: &[f64], n: usize) -> Vec<f64> {
566 let mut m = a.to_vec();
567 let mut inv = eye_n(n);
568 for col in 0..n {
569 let mut max_row = col;
570 for row in (col + 1)..n {
571 if m[row * n + col].abs() > m[max_row * n + col].abs() {
572 max_row = row;
573 }
574 }
575 for j in 0..n {
576 m.swap(col * n + j, max_row * n + j);
577 inv.swap(col * n + j, max_row * n + j);
578 }
579 let d = m[col * n + col];
580 if d.abs() < 1e-300 {
581 continue;
582 }
583 for j in 0..n {
584 m[col * n + j] /= d;
585 inv[col * n + j] /= d;
586 }
587 for row in 0..n {
588 if row == col {
589 continue;
590 }
591 let f = m[row * n + col];
592 for j in 0..n {
593 let mv2 = m[col * n + j];
594 let iv = inv[col * n + j];
595 m[row * n + j] -= f * mv2;
596 inv[row * n + j] -= f * iv;
597 }
598 }
599 }
600 inv
601}
602
603pub struct LevyProcess;
607
608impl LevyProcess {
609 pub fn alpha_stable(
614 alpha: f64,
615 beta: f64,
616 scale: f64,
617 loc: f64,
618 t: f64,
619 n_steps: usize,
620 seed: u64,
621 ) -> Vec<f64> {
622 assert!(alpha > 0.0 && alpha <= 2.0, "alpha must be in (0, 2]");
623 assert!((-1.0..=1.0).contains(&beta), "beta must be in [-1, 1]");
624 let dt = t / n_steps as f64;
625 let mut rng = Lcg::new(seed);
626 let mut path = vec![0.0f64; n_steps + 1];
627 for i in 1..=n_steps {
628 let z = Self::stable_sample(&mut rng, alpha, beta);
629 path[i] = path[i - 1] + loc * dt + scale * dt.powf(1.0 / alpha) * z;
630 }
631 path
632 }
633
634 fn stable_sample(rng: &mut Lcg, alpha: f64, beta: f64) -> f64 {
635 if (alpha - 2.0).abs() < 1e-10 {
636 return rng.normal();
637 }
638 let phi = PI * (rng.uniform() - 0.5);
639 let w = -rng.uniform().max(1e-300).ln();
640 if (alpha - 1.0).abs() < 1e-10 {
641 let b_phi = (PI / 2.0 + beta * phi).tan();
642 return (PI / 2.0) * (b_phi * phi - beta * (b_phi * phi.cos()).max(1e-300).ln());
643 }
644 let zeta = -beta * (PI * alpha / 2.0).tan();
645 let xi = (1.0 / alpha) * (beta * (PI * alpha / 2.0).tan()).atan();
646 let t1 = (1.0 + zeta * zeta).powf(1.0 / (2.0 * alpha));
647 let t2 = (alpha * (phi + xi)).sin() / phi.cos().powf(1.0 / alpha);
648 let t3 = ((phi - alpha * (phi + xi)).cos() / w).powf((1.0 - alpha) / alpha);
649 t1 * t2 * t3
650 }
651
652 pub fn variance_gamma(
656 sigma: f64,
657 nu: f64,
658 theta: f64,
659 t: f64,
660 n_steps: usize,
661 seed: u64,
662 ) -> Vec<f64> {
663 let dt = t / n_steps as f64;
664 let mut rng = Lcg::new(seed);
665 let mut path = vec![0.0f64; n_steps + 1];
666 for i in 1..=n_steps {
667 let dg = Self::gamma_sample(&mut rng, dt / nu, nu);
669 let dw = rng.normal() * dg.sqrt();
670 path[i] = path[i - 1] + theta * dg + sigma * dw;
671 }
672 path
673 }
674
675 fn gamma_sample(rng: &mut Lcg, shape: f64, scale: f64) -> f64 {
676 if shape < 1e-10 {
678 return 0.0;
679 }
680 let (k, s) = if shape < 1.0 {
681 let u = rng.uniform().max(1e-300);
682 (shape + 1.0, u.powf(1.0 / shape))
683 } else {
684 (shape, 1.0)
685 };
686 let d = k - 1.0 / 3.0;
687 let c = 1.0 / (9.0 * d).sqrt();
688 loop {
689 let x = rng.normal();
690 let v = (1.0 + c * x).powi(3);
691 if v <= 0.0 {
692 continue;
693 }
694 let u = rng.uniform().max(1e-300);
695 if u < 1.0 - 0.0331 * x.powi(4) {
696 return d * v * s * scale;
697 }
698 if u.ln() < 0.5 * x * x + d * (1.0 - v + v.ln()) {
699 return d * v * s * scale;
700 }
701 }
702 }
703
704 pub fn cgmy(
709 c: f64,
710 g: f64,
711 m_param: f64,
712 y: f64,
713 t: f64,
714 n_steps: usize,
715 seed: u64,
716 ) -> Vec<f64> {
717 assert!(
718 y < 2.0 && c > 0.0 && g > 0.0 && m_param > 0.0,
719 "Invalid CGMY parameters"
720 );
721 let dt = t / n_steps as f64;
722 let mut rng = Lcg::new(seed);
723 let mut path = vec![0.0f64; n_steps + 1];
724 let eps = 0.1f64; let sigma_small = (2.0 * c * eps.powf(2.0 - y) / (2.0 - y)).sqrt();
728 let lambda_large = c * (g.powf(-y) + m_param.powf(-y)) * (1.0 - y).max(0.1);
730 for i in 1..=n_steps {
731 let diffusive = sigma_small * dt.sqrt() * rng.normal();
732 let n_jumps = {
734 let mu = lambda_large * dt;
735 let mut k = 0usize;
736 let mut p = (-mu).exp();
737 let mut cum = p;
738 let u = rng.uniform();
739 while u > cum && k < 20 {
740 k += 1;
741 p *= mu / k as f64;
742 cum += p;
743 }
744 k
745 };
746 let mut jump_sum = 0.0;
747 for _ in 0..n_jumps {
748 let u = rng.uniform();
749 let jump = if u < 0.5 {
750 -rng.uniform().max(1e-300).ln() / m_param
752 } else {
753 rng.uniform().max(1e-300).ln() / g
755 };
756 jump_sum += jump;
757 }
758 path[i] = path[i - 1] + diffusive + jump_sum;
759 }
760 path
761 }
762
763 pub fn alpha_stable_characteristic_exponent(alpha: f64, beta: f64, u: f64) -> f64 {
767 let tan_term = (PI * alpha / 2.0).tan();
769 -u.abs().powf(alpha) * (1.0 + beta * u.signum() * tan_term)
770 }
771}
772
773pub struct StochasticDifferentialEquation;
777
778impl StochasticDifferentialEquation {
779 pub fn euler_maruyama<F, G>(
783 mu: F,
784 sigma: G,
785 x0: f64,
786 t0: f64,
787 t_end: f64,
788 n_steps: usize,
789 seed: u64,
790 ) -> (Vec<f64>, Vec<f64>)
791 where
792 F: Fn(f64, f64) -> f64,
793 G: Fn(f64, f64) -> f64,
794 {
795 let dt = (t_end - t0) / n_steps as f64;
796 let sqrt_dt = dt.sqrt();
797 let mut rng = Lcg::new(seed);
798 let mut t_path = vec![t0; n_steps + 1];
799 let mut x_path = vec![x0; n_steps + 1];
800 for i in 0..n_steps {
801 let t = t0 + i as f64 * dt;
802 let x = x_path[i];
803 let dw = sqrt_dt * rng.normal();
804 x_path[i + 1] = x + mu(x, t) * dt + sigma(x, t) * dw;
805 t_path[i + 1] = t + dt;
806 }
807 (t_path, x_path)
808 }
809
810 pub fn milstein<F, G, H>(
815 mu: F,
816 sigma: G,
817 sigma_prime: H,
818 x0: f64,
819 t0: f64,
820 t_end: f64,
821 n_steps: usize,
822 seed: u64,
823 ) -> (Vec<f64>, Vec<f64>)
824 where
825 F: Fn(f64, f64) -> f64,
826 G: Fn(f64, f64) -> f64,
827 H: Fn(f64, f64) -> f64,
828 {
829 let dt = (t_end - t0) / n_steps as f64;
830 let sqrt_dt = dt.sqrt();
831 let mut rng = Lcg::new(seed);
832 let mut t_path = vec![t0; n_steps + 1];
833 let mut x_path = vec![x0; n_steps + 1];
834 for i in 0..n_steps {
835 let t = t0 + i as f64 * dt;
836 let x = x_path[i];
837 let z = rng.normal();
838 let dw = sqrt_dt * z;
839 let sig = sigma(x, t);
840 let sig_p = sigma_prime(x, t);
841 x_path[i + 1] = x + mu(x, t) * dt + sig * dw + 0.5 * sig * sig_p * (dw * dw - dt);
842 t_path[i + 1] = t + dt;
843 }
844 (t_path, x_path)
845 }
846
847 pub fn runge_kutta_maruyama<F, G>(
851 mu: F,
852 sigma: G,
853 x0: f64,
854 t0: f64,
855 t_end: f64,
856 n_steps: usize,
857 seed: u64,
858 ) -> (Vec<f64>, Vec<f64>)
859 where
860 F: Fn(f64, f64) -> f64,
861 G: Fn(f64, f64) -> f64,
862 {
863 let dt = (t_end - t0) / n_steps as f64;
864 let sqrt_dt = dt.sqrt();
865 let mut rng = Lcg::new(seed);
866 let mut t_path = vec![t0; n_steps + 1];
867 let mut x_path = vec![x0; n_steps + 1];
868 for i in 0..n_steps {
869 let t = t0 + i as f64 * dt;
870 let x = x_path[i];
871 let dw = sqrt_dt * rng.normal();
872 let sig = sigma(x, t);
873 let x_hat = x + sig * sqrt_dt;
875 let sig_hat = sigma(x_hat, t);
876 x_path[i + 1] =
877 x + mu(x, t) * dt + sig * dw + 0.5 * (sig_hat - sig) * (dw * dw - dt) / sqrt_dt;
878 t_path[i + 1] = t + dt;
879 }
880 (t_path, x_path)
881 }
882
883 pub fn monte_carlo_paths<F, G>(
887 mu: F,
888 sigma: G,
889 x0: f64,
890 t0: f64,
891 t_end: f64,
892 n_steps: usize,
893 n_paths: usize,
894 seed: u64,
895 ) -> Vec<Vec<f64>>
896 where
897 F: Fn(f64, f64) -> f64 + Clone,
898 G: Fn(f64, f64) -> f64 + Clone,
899 {
900 (0..n_paths)
901 .map(|k| {
902 let (_, x) = Self::euler_maruyama(
903 mu.clone(),
904 sigma.clone(),
905 x0,
906 t0,
907 t_end,
908 n_steps,
909 seed.wrapping_add(k as u64),
910 );
911 x
912 })
913 .collect()
914 }
915
916 pub fn strong_error_estimate<F, G>(
920 mu: F,
921 sigma: G,
922 x0: f64,
923 t0: f64,
924 t_end: f64,
925 seed: u64,
926 n_steps_coarse: usize,
927 n_steps_fine: usize,
928 ) -> f64
929 where
930 F: Fn(f64, f64) -> f64 + Clone,
931 G: Fn(f64, f64) -> f64 + Clone,
932 {
933 let (_, x_coarse) = Self::euler_maruyama(
934 mu.clone(),
935 sigma.clone(),
936 x0,
937 t0,
938 t_end,
939 n_steps_coarse,
940 seed,
941 );
942 let (_, x_fine) = Self::euler_maruyama(mu, sigma, x0, t0, t_end, n_steps_fine, seed);
943 (*x_coarse.last().expect("coarse path is non-empty")
944 - *x_fine.last().expect("fine path is non-empty"))
945 .abs()
946 }
947
948 pub fn weak_order_estimate<F, G, H>(
952 mu: F,
953 sigma: G,
954 functional: H,
955 x0: f64,
956 t0: f64,
957 t_end: f64,
958 n_steps: usize,
959 n_paths: usize,
960 seed: u64,
961 ) -> f64
962 where
963 F: Fn(f64, f64) -> f64 + Clone,
964 G: Fn(f64, f64) -> f64 + Clone,
965 H: Fn(f64) -> f64,
966 {
967 let paths = Self::monte_carlo_paths(mu, sigma, x0, t0, t_end, n_steps, n_paths, seed);
968 let sum: f64 = paths
969 .iter()
970 .map(|p| functional(*p.last().expect("path is non-empty")))
971 .sum();
972 sum / n_paths as f64
973 }
974
975 pub fn stratonovich_to_ito<G, H>(
979 strat_mu: f64,
980 sigma_val: f64,
981 sigma_prime: f64,
982 _sigma: G,
983 _sigma_p: H,
984 ) -> f64
985 where
986 G: Fn(f64, f64) -> f64,
987 H: Fn(f64, f64) -> f64,
988 {
989 strat_mu - 0.5 * sigma_val * sigma_prime
990 }
991}
992
993pub fn sample_mean(data: &[f64]) -> f64 {
997 data.iter().sum::<f64>() / data.len() as f64
998}
999
1000pub fn sample_variance(data: &[f64]) -> f64 {
1002 let m = sample_mean(data);
1003 data.iter().map(|x| (x - m).powi(2)).sum::<f64>() / (data.len() - 1).max(1) as f64
1004}
1005
1006pub fn autocorrelation(data: &[f64], lag: usize) -> f64 {
1008 let n = data.len();
1009 if lag >= n {
1010 return 0.0;
1011 }
1012 let m = sample_mean(data);
1013 let var = sample_variance(data);
1014 if var < 1e-300 {
1015 return 0.0;
1016 }
1017 let cov: f64 = (0..n - lag)
1018 .map(|i| (data[i] - m) * (data[i + lag] - m))
1019 .sum::<f64>()
1020 / (n - lag) as f64;
1021 cov / var
1022}
1023
1024pub fn periodogram(data: &[f64]) -> Vec<f64> {
1026 let n = data.len();
1027 let m = sample_mean(data);
1028 let centered: Vec<f64> = data.iter().map(|x| x - m).collect();
1029 (0..n / 2)
1030 .map(|k| {
1031 let re: f64 = (0..n)
1032 .map(|t| centered[t] * (2.0 * PI * k as f64 * t as f64 / n as f64).cos())
1033 .sum();
1034 let im: f64 = (0..n)
1035 .map(|t| centered[t] * (2.0 * PI * k as f64 * t as f64 / n as f64).sin())
1036 .sum();
1037 (re * re + im * im) / n as f64
1038 })
1039 .collect()
1040}
1041
1042#[cfg(test)]
1045mod tests {
1046 use super::*;
1047
1048 #[test]
1051 fn test_bm_starts_at_zero() {
1052 let path = BrownianMotion::standard(1.0, 100, 42);
1053 assert_eq!(path[0], 0.0);
1054 }
1055
1056 #[test]
1057 fn test_bm_length() {
1058 let n = 200;
1059 let path = BrownianMotion::standard(1.0, n, 1);
1060 assert_eq!(path.len(), n + 1);
1061 }
1062
1063 #[test]
1064 fn test_bm_quadratic_variation() {
1065 let path = BrownianMotion::standard(1.0, 10000, 7);
1067 let qv = BrownianMotion::quadratic_variation(&path);
1068 assert!((qv - 1.0).abs() < 0.1, "QV = {qv}");
1069 }
1070
1071 #[test]
1072 fn test_gbm_positive() {
1073 let path = BrownianMotion::geometric(100.0, 0.05, 0.2, 1.0, 100, 99);
1074 assert!(path.iter().all(|&v| v > 0.0), "GBM should be positive");
1075 }
1076
1077 #[test]
1078 fn test_gbm_starts_at_s0() {
1079 let path = BrownianMotion::geometric(50.0, 0.1, 0.3, 1.0, 100, 5);
1080 assert!((path[0] - 50.0).abs() < 1e-10);
1081 }
1082
1083 #[test]
1084 fn test_fbm_length() {
1085 let path = BrownianMotion::fractional(0.7, 50, 42);
1086 assert_eq!(path.len(), 51);
1087 }
1088
1089 #[test]
1090 fn test_fbm_starts_at_zero() {
1091 let path = BrownianMotion::fractional(0.3, 100, 10);
1092 assert_eq!(path[0], 0.0);
1093 }
1094
1095 #[test]
1096 fn test_bm_variance_approx() {
1097 let path = BrownianMotion::standard(1.0, 1000, 123);
1098 let increments: Vec<f64> = path.windows(2).map(|w| w[1] - w[0]).collect();
1100 let v = BrownianMotion::variance(&increments);
1101 assert!((v - 0.001).abs() < 0.002, "variance = {v}");
1103 }
1104
1105 #[test]
1108 fn test_poisson_sorted_times() {
1109 let times = PoissonProcess::homogeneous(5.0, 10.0, 42);
1110 for w in times.windows(2) {
1111 assert!(w[0] < w[1]);
1112 }
1113 }
1114
1115 #[test]
1116 fn test_poisson_times_in_range() {
1117 let t = 5.0;
1118 let times = PoissonProcess::homogeneous(3.0, t, 7);
1119 assert!(times.iter().all(|&ti| ti < t));
1120 }
1121
1122 #[test]
1123 fn test_poisson_expected_count() {
1124 let expected = PoissonProcess::expected_count(4.0, 2.5);
1125 assert!((expected - 10.0).abs() < 1e-10);
1126 }
1127
1128 #[test]
1129 fn test_poisson_empirical_rate() {
1130 let t = 100.0;
1131 let lambda = 5.0;
1132 let times = PoissonProcess::homogeneous(lambda, t, 99);
1133 let emp = PoissonProcess::empirical_rate(×, t);
1134 assert!((emp - lambda).abs() < 1.0, "empirical rate = {emp}");
1135 }
1136
1137 #[test]
1138 fn test_poisson_inhomogeneous() {
1139 let times = PoissonProcess::inhomogeneous(|t| 1.0 + t, 11.0, 10.0, 42);
1140 assert!(times.iter().all(|&ti| ti < 10.0));
1141 }
1142
1143 #[test]
1144 fn test_compound_poisson() {
1145 let (times, values) = PoissonProcess::compound(2.0, 5.0, |u| u, 42);
1146 assert!(values.len() == times.len() + 1, "values has one more entry");
1147 assert_eq!(values[0], 0.0);
1148 }
1149
1150 #[test]
1151 fn test_inter_arrival_cdf() {
1152 let cdf = PoissonProcess::inter_arrival_cdf(1.0, 0.0);
1153 assert_eq!(cdf, 0.0);
1154 let cdf_inf = PoissonProcess::inter_arrival_cdf(1.0, 100.0);
1155 assert!((cdf_inf - 1.0).abs() < 1e-30);
1156 }
1157
1158 fn simple_mc() -> MarkovChain {
1161 MarkovChain::new(vec![0.9, 0.1, 0.2, 0.8], 2)
1163 }
1164
1165 #[test]
1166 fn test_mc_trajectory_length() {
1167 let mc = simple_mc();
1168 let traj = mc.simulate(0, 100, 42);
1169 assert_eq!(traj.len(), 101);
1170 }
1171
1172 #[test]
1173 fn test_mc_states_valid() {
1174 let mc = simple_mc();
1175 let traj = mc.simulate(0, 200, 5);
1176 assert!(traj.iter().all(|&s| s < 2));
1177 }
1178
1179 #[test]
1180 fn test_mc_stationary_sums_to_one() {
1181 let mc = simple_mc();
1182 let pi = mc.stationary_distribution(1e-10, 2000);
1183 let s: f64 = pi.iter().sum();
1184 assert!((s - 1.0).abs() < 1e-8, "sum = {s}");
1185 }
1186
1187 #[test]
1188 fn test_mc_stationary_known() {
1189 let mc = simple_mc();
1190 let pi = mc.stationary_distribution(1e-10, 2000);
1191 assert!((pi[0] - 2.0 / 3.0).abs() < 1e-4, "pi[0] = {}", pi[0]);
1193 assert!((pi[1] - 1.0 / 3.0).abs() < 1e-4, "pi[1] = {}", pi[1]);
1194 }
1195
1196 #[test]
1197 fn test_mc_n_step_matrix() {
1198 let mc = simple_mc();
1199 let p1 = mc.n_step_matrix(1);
1200 for (a, b) in p1.iter().zip(&mc.p) {
1201 assert!((a - b).abs() < 1e-10);
1202 }
1203 }
1204
1205 #[test]
1206 fn test_mc_mixing_time() {
1207 let mc = simple_mc();
1208 let t = mc.mixing_time(0, 0.01);
1209 assert!(t > 0);
1210 }
1211
1212 #[test]
1213 fn test_empirical_transition_matrix() {
1214 let mc = simple_mc();
1215 let traj = mc.simulate(0, 10000, 77);
1216 let emp = MarkovChain::empirical_transition_matrix(&traj, 2);
1217 assert!((emp[0] - 0.9).abs() < 0.05, "emp[0,0] = {}", emp[0]);
1219 }
1220
1221 fn simple_ctmc() -> ContinuousTimeMarkov {
1224 ContinuousTimeMarkov::new(vec![-2.0, 2.0, 3.0, -3.0], 2)
1226 }
1227
1228 #[test]
1229 fn test_ctmc_simulate_runs() {
1230 let ctmc = simple_ctmc();
1231 let (times, states) = ctmc.simulate(0, 5.0, 42);
1232 assert_eq!(times.len(), states.len());
1233 assert!(times.iter().all(|&t| t < 5.0));
1234 }
1235
1236 #[test]
1237 fn test_ctmc_stationary_sums_to_one() {
1238 let ctmc = simple_ctmc();
1239 let pi = ctmc.stationary_distribution(1e-10, 2000);
1240 let s: f64 = pi.iter().sum();
1241 assert!((s - 1.0).abs() < 1e-6, "sum = {s}");
1242 }
1243
1244 #[test]
1245 fn test_ctmc_stationary_known() {
1246 let ctmc = simple_ctmc();
1247 let pi = ctmc.stationary_distribution(1e-10, 2000);
1248 assert!((pi[0] - 0.6).abs() < 0.05, "pi[0] = {}", pi[0]);
1250 }
1251
1252 #[test]
1253 fn test_ctmc_kolmogorov_sums_to_one() {
1254 let ctmc = simple_ctmc();
1255 let p0 = vec![1.0, 0.0];
1256 let pt = ctmc.kolmogorov_forward(&p0, 1.0);
1257 let s: f64 = pt.iter().sum();
1258 assert!((s - 1.0).abs() < 1e-6, "sum = {s}");
1259 }
1260
1261 #[test]
1262 fn test_ctmc_mean_sojourn() {
1263 let ctmc = simple_ctmc();
1264 assert!((ctmc.mean_sojourn_time(0) - 0.5).abs() < 1e-10);
1265 assert!((ctmc.mean_sojourn_time(1) - 1.0 / 3.0).abs() < 1e-10);
1266 }
1267
1268 #[test]
1269 fn test_ctmc_first_passage_finite() {
1270 let ctmc = simple_ctmc();
1271 let fpt = ctmc.mean_first_passage_time(0, 1);
1272 assert!(fpt.is_finite() && fpt > 0.0);
1273 }
1274
1275 #[test]
1278 fn test_alpha_stable_length() {
1279 let path = LevyProcess::alpha_stable(1.5, 0.0, 1.0, 0.0, 1.0, 100, 42);
1280 assert_eq!(path.len(), 101);
1281 }
1282
1283 #[test]
1284 fn test_alpha_stable_starts_zero() {
1285 let path = LevyProcess::alpha_stable(1.8, 0.2, 0.5, 0.0, 1.0, 50, 7);
1286 assert_eq!(path[0], 0.0);
1287 }
1288
1289 #[test]
1290 fn test_alpha_stable_finite() {
1291 let path = LevyProcess::alpha_stable(1.5, 0.0, 1.0, 0.0, 1.0, 50, 1);
1292 assert!(path.iter().all(|v| v.is_finite()));
1293 }
1294
1295 #[test]
1296 fn test_vg_length() {
1297 let path = LevyProcess::variance_gamma(0.2, 0.1, -0.1, 1.0, 100, 42);
1298 assert_eq!(path.len(), 101);
1299 }
1300
1301 #[test]
1302 fn test_vg_starts_zero() {
1303 let path = LevyProcess::variance_gamma(0.2, 0.1, 0.0, 1.0, 50, 5);
1304 assert_eq!(path[0], 0.0);
1305 }
1306
1307 #[test]
1308 fn test_cgmy_length() {
1309 let path = LevyProcess::cgmy(1.0, 5.0, 10.0, 0.5, 1.0, 100, 42);
1310 assert_eq!(path.len(), 101);
1311 }
1312
1313 #[test]
1314 fn test_cgmy_finite() {
1315 let path = LevyProcess::cgmy(1.0, 5.0, 10.0, 0.5, 1.0, 50, 9);
1316 assert!(path.iter().all(|v| v.is_finite()));
1317 }
1318
1319 #[test]
1320 fn test_alpha_stable_char_exp() {
1321 let psi = LevyProcess::alpha_stable_characteristic_exponent(2.0, 0.0, 1.0);
1322 assert!((psi + 1.0).abs() < 1e-10, "psi = {psi}");
1324 }
1325
1326 #[test]
1329 fn test_em_gbm_terminal() {
1330 let mu_val = 0.05;
1332 let sigma_val = 0.2;
1333 let s0 = 100.0;
1334 let t = 1.0;
1335 let (_, x) = StochasticDifferentialEquation::euler_maruyama(
1336 move |x, _t| mu_val * x,
1337 move |x, _t| sigma_val * x,
1338 s0,
1339 0.0,
1340 t,
1341 1000,
1342 42,
1343 );
1344 assert!(*x.last().unwrap() > 0.0);
1345 }
1346
1347 #[test]
1348 fn test_em_starts_at_x0() {
1349 let (_, x) = StochasticDifferentialEquation::euler_maruyama(
1350 |x, _| x,
1351 |_, _| 0.1,
1352 5.0,
1353 0.0,
1354 1.0,
1355 100,
1356 1,
1357 );
1358 assert!((x[0] - 5.0).abs() < 1e-10);
1359 }
1360
1361 #[test]
1362 fn test_milstein_ou() {
1363 let theta = 2.0;
1365 let sig = 0.5;
1366 let (_, x) = StochasticDifferentialEquation::milstein(
1367 move |x, _| -theta * x,
1368 move |_, _| sig,
1369 |_, _| 0.0, 1.0,
1371 0.0,
1372 2.0,
1373 500,
1374 42,
1375 );
1376 assert!(x.iter().all(|v| v.is_finite()));
1377 }
1378
1379 #[test]
1380 fn test_milstein_length() {
1381 let (t, x) = StochasticDifferentialEquation::milstein(
1382 |x, _| x,
1383 |_, _| 0.1,
1384 |_, _| 0.0,
1385 1.0,
1386 0.0,
1387 1.0,
1388 100,
1389 5,
1390 );
1391 assert_eq!(t.len(), 101);
1392 assert_eq!(x.len(), 101);
1393 }
1394
1395 #[test]
1396 fn test_rkm_finite() {
1397 let (_, x) = StochasticDifferentialEquation::runge_kutta_maruyama(
1398 |x, _| -x,
1399 |_, _| 0.5,
1400 1.0,
1401 0.0,
1402 1.0,
1403 200,
1404 42,
1405 );
1406 assert!(x.iter().all(|v| v.is_finite()));
1407 }
1408
1409 #[test]
1410 fn test_monte_carlo_paths_count() {
1411 let paths = StochasticDifferentialEquation::monte_carlo_paths(
1412 |x, _| -x,
1413 |_, _| 0.2,
1414 1.0,
1415 0.0,
1416 1.0,
1417 50,
1418 10,
1419 42,
1420 );
1421 assert_eq!(paths.len(), 10);
1422 assert!(paths.iter().all(|p| p.len() == 51));
1423 }
1424
1425 #[test]
1426 fn test_strong_error_finite() {
1427 let err = StochasticDifferentialEquation::strong_error_estimate(
1428 |x, _| -x,
1429 |_, _| 0.2,
1430 1.0,
1431 0.0,
1432 1.0,
1433 42,
1434 10,
1435 100,
1436 );
1437 assert!(err.is_finite());
1438 }
1439
1440 #[test]
1441 fn test_weak_order_estimate() {
1442 let ev = StochasticDifferentialEquation::weak_order_estimate(
1444 |x, _| x,
1445 |_, _| 0.0,
1446 |x| x,
1447 1.0,
1448 0.0,
1449 1.0,
1450 100,
1451 50,
1452 42,
1453 );
1454 assert!((ev - std::f64::consts::E).abs() < 0.1, "E[X(T)] = {ev}");
1455 }
1456
1457 #[test]
1458 fn test_stratonovich_to_ito() {
1459 let ito_mu = StochasticDifferentialEquation::stratonovich_to_ito(
1460 1.0,
1461 0.5,
1462 0.5,
1463 |_: f64, _: f64| 0.5f64,
1464 |_: f64, _: f64| 0.5f64,
1465 );
1466 assert!((ito_mu - (1.0 - 0.5 * 0.5 * 0.5)).abs() < 1e-10);
1467 }
1468
1469 #[test]
1472 fn test_sample_mean() {
1473 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1474 assert!((sample_mean(&data) - 3.0).abs() < 1e-10);
1475 }
1476
1477 #[test]
1478 fn test_sample_variance() {
1479 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1480 assert!((sample_variance(&data) - 2.5).abs() < 1e-10);
1481 }
1482
1483 #[test]
1484 fn test_autocorrelation_lag0() {
1485 let data: Vec<f64> = (0..100).map(|i| i as f64).collect();
1486 let ac = autocorrelation(&data, 0);
1487 assert!((ac - 1.0).abs() < 0.02, "lag-0 autocorrelation = {ac}");
1490 }
1491
1492 #[test]
1493 fn test_autocorrelation_white_noise() {
1494 let path = BrownianMotion::standard(1.0, 500, 88);
1496 let increments: Vec<f64> = path.windows(2).map(|w| w[1] - w[0]).collect();
1497 let ac1 = autocorrelation(&increments, 1).abs();
1498 assert!(ac1 < 0.15, "lag-1 autocorr = {ac1}");
1499 }
1500
1501 #[test]
1502 fn test_periodogram_length() {
1503 let data: Vec<f64> = (0..64).map(|i| (i as f64).sin()).collect();
1504 let psd = periodogram(&data);
1505 assert_eq!(psd.len(), 32);
1506 }
1507
1508 #[test]
1509 fn test_lcg_uniform_range() {
1510 let mut rng = Lcg::new(42);
1511 for _ in 0..1000 {
1512 let u = rng.uniform();
1513 assert!((0.0..1.0).contains(&u), "uniform out of range: {u}");
1514 }
1515 }
1516
1517 #[test]
1518 fn test_lcg_normal_finite() {
1519 let mut rng = Lcg::new(42);
1520 for _ in 0..100 {
1521 let z = rng.normal();
1522 assert!(z.is_finite());
1523 }
1524 }
1525}