Skip to main content

oxicuda_rand/
monte_carlo.rs

1//! Monte Carlo simulation primitives.
2//!
3//! Provides CPU-side Monte Carlo integration, variance reduction techniques,
4//! MCMC samplers (Metropolis-Hastings and Hamiltonian Monte Carlo), and
5//! financial option pricing via geometric Brownian motion.
6//!
7//! All methods use an internal Xoshiro256**-style PRNG so they do not
8//! require a CUDA context.
9
10use crate::error::{RandError, RandResult};
11
12// ---------------------------------------------------------------------------
13// Internal PRNG -- Xoshiro256**
14// ---------------------------------------------------------------------------
15
16/// Minimal Xoshiro256** PRNG for CPU-side Monte Carlo.
17#[derive(Clone, Debug)]
18struct Xoshiro256 {
19    s: [u64; 4],
20}
21
22impl Xoshiro256 {
23    /// Seed the generator using SplitMix64 to fill the state.
24    fn new(seed: u64) -> Self {
25        let mut sm = seed;
26        let mut s = [0u64; 4];
27        for slot in &mut s {
28            sm = sm.wrapping_add(0x9e37_79b9_7f4a_7c15);
29            let mut z = sm;
30            z = (z ^ (z >> 30)).wrapping_mul(0xbf58_476d_1ce4_e5b9);
31            z = (z ^ (z >> 27)).wrapping_mul(0x94d0_49bb_1331_11eb);
32            z ^= z >> 31;
33            *slot = z;
34        }
35        Self { s }
36    }
37
38    #[inline]
39    fn next_u64(&mut self) -> u64 {
40        let result = (self.s[1].wrapping_mul(5)).rotate_left(7).wrapping_mul(9);
41        let t = self.s[1] << 17;
42        self.s[2] ^= self.s[0];
43        self.s[3] ^= self.s[1];
44        self.s[1] ^= self.s[2];
45        self.s[0] ^= self.s[3];
46        self.s[2] ^= t;
47        self.s[3] = self.s[3].rotate_left(45);
48        result
49    }
50
51    /// Returns a uniform f64 in [0, 1).
52    #[inline]
53    fn next_f64(&mut self) -> f64 {
54        (self.next_u64() >> 11) as f64 * (1.0 / (1u64 << 53) as f64)
55    }
56
57    /// Returns a standard normal variate via Box-Muller.
58    fn next_normal(&mut self) -> f64 {
59        loop {
60            let u1 = self.next_f64();
61            let u2 = self.next_f64();
62            if u1 > 1e-300 {
63                return (-2.0 * u1.ln()).sqrt() * (2.0 * core::f64::consts::PI * u2).cos();
64            }
65        }
66    }
67}
68
69// ---------------------------------------------------------------------------
70// Configuration & Results
71// ---------------------------------------------------------------------------
72
73/// Configuration for Monte Carlo simulations.
74#[derive(Clone, Debug)]
75pub struct MonteCarloConfig {
76    /// Number of samples to draw.
77    pub num_samples: usize,
78    /// PRNG seed.
79    pub seed: u64,
80    /// Confidence level for the confidence interval (default 0.95).
81    pub confidence_level: f64,
82    /// Enable antithetic variates when applicable.
83    pub use_antithetic: bool,
84    /// Enable control variates when applicable.
85    pub use_control_variate: bool,
86}
87
88impl Default for MonteCarloConfig {
89    fn default() -> Self {
90        Self {
91            num_samples: 10_000,
92            seed: 42,
93            confidence_level: 0.95,
94            use_antithetic: false,
95            use_control_variate: false,
96        }
97    }
98}
99
100impl MonteCarloConfig {
101    /// Validate the configuration, returning an error on bad values.
102    fn validate(&self) -> RandResult<()> {
103        if self.num_samples == 0 {
104            return Err(RandError::InvalidSize(
105                "num_samples must be positive".to_string(),
106            ));
107        }
108        if self.confidence_level <= 0.0 || self.confidence_level >= 1.0 {
109            return Err(RandError::InvalidSize(
110                "confidence_level must be in (0, 1)".to_string(),
111            ));
112        }
113        Ok(())
114    }
115}
116
117/// Result of a Monte Carlo estimation.
118#[derive(Clone, Debug)]
119pub struct MonteCarloResult {
120    /// Point estimate.
121    pub estimate: f64,
122    /// Standard error of the estimate.
123    pub std_error: f64,
124    /// Confidence interval `(lower, upper)`.
125    pub confidence_interval: (f64, f64),
126    /// Number of samples used.
127    pub num_samples: usize,
128    /// Sample variance.
129    pub variance: f64,
130}
131
132/// Result of an MCMC sampling run.
133#[derive(Clone, Debug)]
134pub struct McmcResult {
135    /// Collected samples (each inner Vec is one sample in R^d).
136    pub samples: Vec<Vec<f64>>,
137    /// Fraction of proposals accepted.
138    pub acceptance_rate: f64,
139    /// Estimated effective sample size.
140    pub effective_sample_size: f64,
141}
142
143/// Internal state for MCMC chains.
144#[derive(Clone, Debug)]
145pub struct SamplerState {
146    /// Current position in parameter space.
147    pub position: Vec<f64>,
148    /// Current log-density value.
149    pub log_density: f64,
150}
151
152// ---------------------------------------------------------------------------
153// Helper: z-quantile for confidence intervals
154// ---------------------------------------------------------------------------
155
156/// Approximate inverse of the standard normal CDF using rational approximation
157/// (Abramowitz & Stegun 26.2.23).
158fn normal_quantile(p: f64) -> f64 {
159    // For p in (0, 0.5] we use the left tail; symmetry for p > 0.5.
160    if p <= 0.0 || p >= 1.0 {
161        return f64::NAN;
162    }
163    let (sign, pp) = if p < 0.5 { (-1.0, 1.0 - p) } else { (1.0, p) };
164    let t = (-2.0 * (1.0 - pp).ln()).sqrt();
165    let c0 = 2.515_517;
166    let c1 = 0.802_853;
167    let c2 = 0.010_328;
168    let d1 = 1.432_788;
169    let d2 = 0.189_269;
170    let d3 = 0.001_308;
171    let numerator = c0 + c1 * t + c2 * t * t;
172    let denominator = 1.0 + d1 * t + d2 * t * t + d3 * t * t * t;
173    sign * (t - numerator / denominator)
174}
175
176/// Build a [`MonteCarloResult`] from a slice of values.
177fn build_result(values: &[f64], config: &MonteCarloConfig) -> MonteCarloResult {
178    let n = values.len() as f64;
179    let mean = values.iter().sum::<f64>() / n;
180    let var = values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / (n - 1.0);
181    let se = (var / n).sqrt();
182    let z = normal_quantile(0.5 + config.confidence_level / 2.0);
183    MonteCarloResult {
184        estimate: mean,
185        std_error: se,
186        confidence_interval: (mean - z * se, mean + z * se),
187        num_samples: values.len(),
188        variance: var,
189    }
190}
191
192// ---------------------------------------------------------------------------
193// Core Integration
194// ---------------------------------------------------------------------------
195
196/// Simple Monte Carlo integration of `f` on \[0, 1\].
197///
198/// Estimates `\int_0^1 f(x) dx` by averaging `f(U_i)` over uniform samples.
199///
200/// # Errors
201///
202/// Returns an error if the configuration is invalid.
203pub fn mc_integrate<F>(f: F, config: &MonteCarloConfig) -> RandResult<MonteCarloResult>
204where
205    F: Fn(f64) -> f64,
206{
207    config.validate()?;
208    let mut rng = Xoshiro256::new(config.seed);
209    let values: Vec<f64> = (0..config.num_samples).map(|_| f(rng.next_f64())).collect();
210    Ok(build_result(&values, config))
211}
212
213/// Multi-dimensional Monte Carlo integration of `f` on \[0, 1\]^d.
214///
215/// # Errors
216///
217/// Returns an error if `dim` is zero or the configuration is invalid.
218pub fn mc_integrate_nd<F>(
219    f: F,
220    dim: usize,
221    config: &MonteCarloConfig,
222) -> RandResult<MonteCarloResult>
223where
224    F: Fn(&[f64]) -> f64,
225{
226    config.validate()?;
227    if dim == 0 {
228        return Err(RandError::InvalidSize("dim must be >= 1".to_string()));
229    }
230    let mut rng = Xoshiro256::new(config.seed);
231    let mut point = vec![0.0f64; dim];
232    let values: Vec<f64> = (0..config.num_samples)
233        .map(|_| {
234            for x in point.iter_mut() {
235                *x = rng.next_f64();
236            }
237            f(&point)
238        })
239        .collect();
240    Ok(build_result(&values, config))
241}
242
243/// Importance sampling Monte Carlo integration.
244///
245/// Estimates `\int f(x) dx` using a proposal distribution `g` (pdf) from
246/// which we can draw samples (`proposal_sample`). The estimator is
247/// `(1/N) \sum f(x_i)/g(x_i)`.
248///
249/// # Errors
250///
251/// Returns an error if the configuration is invalid.
252pub fn mc_integrate_importance<F, G, S>(
253    f: F,
254    proposal_pdf: G,
255    proposal_sample: S,
256    config: &MonteCarloConfig,
257) -> RandResult<MonteCarloResult>
258where
259    F: Fn(f64) -> f64,
260    G: Fn(f64) -> f64,
261    S: Fn(&mut dyn FnMut() -> f64) -> f64,
262{
263    config.validate()?;
264    let mut rng = Xoshiro256::new(config.seed);
265    let mut uniform = || rng.next_f64();
266    let values: Vec<f64> = (0..config.num_samples)
267        .map(|_| {
268            let x = proposal_sample(&mut uniform);
269            let g = proposal_pdf(x);
270            if g.abs() < 1e-300 { 0.0 } else { f(x) / g }
271        })
272        .collect();
273    Ok(build_result(&values, config))
274}
275
276// ---------------------------------------------------------------------------
277// Variance Reduction
278// ---------------------------------------------------------------------------
279
280/// Antithetic variates integration of `f` on \[0, 1\].
281///
282/// Pairs each uniform sample `u` with `1 - u` to reduce variance.
283///
284/// # Errors
285///
286/// Returns an error if the configuration is invalid.
287pub fn antithetic_integrate<F>(f: F, config: &MonteCarloConfig) -> RandResult<MonteCarloResult>
288where
289    F: Fn(f64) -> f64,
290{
291    config.validate()?;
292    let mut rng = Xoshiro256::new(config.seed);
293    // Each pair produces one averaged value.
294    let half_n = config.num_samples / 2;
295    let values: Vec<f64> = (0..half_n)
296        .map(|_| {
297            let u = rng.next_f64();
298            0.5 * (f(u) + f(1.0 - u))
299        })
300        .collect();
301    Ok(build_result(&values, config))
302}
303
304/// Control variate integration of `f` on \[0, 1\].
305///
306/// Uses a control function `c` with known mean `control_mean` to reduce
307/// variance via `f(x) - beta * (c(x) - control_mean)`.
308///
309/// # Errors
310///
311/// Returns an error if the configuration is invalid.
312pub fn control_variate_integrate<F, C>(
313    f: F,
314    control: C,
315    control_mean: f64,
316    config: &MonteCarloConfig,
317) -> RandResult<MonteCarloResult>
318where
319    F: Fn(f64) -> f64,
320    C: Fn(f64) -> f64,
321{
322    config.validate()?;
323    let mut rng = Xoshiro256::new(config.seed);
324    // First pass: collect raw f and c values to estimate optimal beta.
325    let mut f_vals = Vec::with_capacity(config.num_samples);
326    let mut c_vals = Vec::with_capacity(config.num_samples);
327    for _ in 0..config.num_samples {
328        let u = rng.next_f64();
329        f_vals.push(f(u));
330        c_vals.push(control(u));
331    }
332
333    let f_mean = f_vals.iter().sum::<f64>() / f_vals.len() as f64;
334    let c_mean_sample = c_vals.iter().sum::<f64>() / c_vals.len() as f64;
335
336    let mut cov = 0.0;
337    let mut var_c = 0.0;
338    for i in 0..f_vals.len() {
339        let df = f_vals[i] - f_mean;
340        let dc = c_vals[i] - c_mean_sample;
341        cov += df * dc;
342        var_c += dc * dc;
343    }
344
345    let beta = if var_c.abs() < 1e-300 {
346        0.0
347    } else {
348        cov / var_c
349    };
350
351    let adjusted: Vec<f64> = f_vals
352        .iter()
353        .zip(c_vals.iter())
354        .map(|(&fv, &cv)| fv - beta * (cv - control_mean))
355        .collect();
356
357    Ok(build_result(&adjusted, config))
358}
359
360/// Stratified sampling integration of `f` on \[0, 1\].
361///
362/// Divides \[0, 1\] into `num_strata` equal bins and samples within each.
363///
364/// # Errors
365///
366/// Returns an error if `num_strata` is zero or the configuration is invalid.
367pub fn stratified_integrate<F>(
368    f: F,
369    num_strata: usize,
370    config: &MonteCarloConfig,
371) -> RandResult<MonteCarloResult>
372where
373    F: Fn(f64) -> f64,
374{
375    config.validate()?;
376    if num_strata == 0 {
377        return Err(RandError::InvalidSize(
378            "num_strata must be >= 1".to_string(),
379        ));
380    }
381    let mut rng = Xoshiro256::new(config.seed);
382    let samples_per_stratum = config.num_samples / num_strata;
383    let samples_per_stratum = if samples_per_stratum == 0 {
384        1
385    } else {
386        samples_per_stratum
387    };
388
389    let mut values = Vec::with_capacity(num_strata * samples_per_stratum);
390    let stratum_width = 1.0 / num_strata as f64;
391
392    for i in 0..num_strata {
393        let lo = i as f64 * stratum_width;
394        for _ in 0..samples_per_stratum {
395            let u = lo + rng.next_f64() * stratum_width;
396            values.push(f(u));
397        }
398    }
399
400    Ok(build_result(&values, config))
401}
402
403// ---------------------------------------------------------------------------
404// MCMC -- Metropolis-Hastings
405// ---------------------------------------------------------------------------
406
407/// Metropolis-Hastings sampler with symmetric Gaussian proposals.
408#[derive(Clone, Debug)]
409pub struct MetropolisHastings {
410    log_target: fn(&[f64]) -> f64,
411    proposal_std: f64,
412    dim: usize,
413}
414
415impl MetropolisHastings {
416    /// Create a new Metropolis-Hastings sampler.
417    ///
418    /// - `log_target`: log of the unnormalized target density.
419    /// - `proposal_std`: standard deviation of the isotropic Gaussian proposal.
420    /// - `dim`: dimensionality of the parameter space.
421    pub fn new(log_target: fn(&[f64]) -> f64, proposal_std: f64, dim: usize) -> Self {
422        Self {
423            log_target,
424            proposal_std,
425            dim,
426        }
427    }
428
429    /// Run the sampler, discarding `burn_in` initial samples.
430    ///
431    /// # Errors
432    ///
433    /// Returns an error if `num_samples` is zero.
434    pub fn sample(
435        &mut self,
436        num_samples: usize,
437        burn_in: usize,
438        seed: u64,
439    ) -> RandResult<McmcResult> {
440        if num_samples == 0 {
441            return Err(RandError::InvalidSize(
442                "num_samples must be positive".to_string(),
443            ));
444        }
445        let mut rng = Xoshiro256::new(seed);
446        let mut current = vec![0.0f64; self.dim];
447        let mut current_log_p = (self.log_target)(&current);
448        let mut accepted = 0u64;
449        let total = burn_in + num_samples;
450        let mut samples = Vec::with_capacity(num_samples);
451
452        for step in 0..total {
453            // Propose
454            let proposal: Vec<f64> = current
455                .iter()
456                .map(|&x| x + self.proposal_std * rng.next_normal())
457                .collect();
458            let proposal_log_p = (self.log_target)(&proposal);
459            let log_alpha = proposal_log_p - current_log_p;
460
461            if log_alpha >= 0.0 || rng.next_f64().ln() < log_alpha {
462                current = proposal;
463                current_log_p = proposal_log_p;
464                if step >= burn_in {
465                    accepted += 1;
466                }
467            } else if step >= burn_in {
468                // rejection after burn-in still counts toward denominator but
469                // not accepted count
470            }
471
472            if step >= burn_in {
473                samples.push(current.clone());
474            }
475        }
476
477        let ess = if self.dim > 0 {
478            let first_dim: Vec<f64> = samples.iter().map(|s| s[0]).collect();
479            effective_sample_size(&first_dim)
480        } else {
481            num_samples as f64
482        };
483
484        Ok(McmcResult {
485            samples,
486            acceptance_rate: accepted as f64 / num_samples as f64,
487            effective_sample_size: ess,
488        })
489    }
490}
491
492// ---------------------------------------------------------------------------
493// MCMC -- Hamiltonian Monte Carlo
494// ---------------------------------------------------------------------------
495
496/// Hamiltonian Monte Carlo (HMC) sampler.
497#[derive(Clone, Debug)]
498pub struct HamiltonianMC {
499    log_target: fn(&[f64]) -> f64,
500    grad_log_target: fn(&[f64]) -> Vec<f64>,
501    step_size: f64,
502    num_leapfrog: usize,
503    dim: usize,
504}
505
506impl HamiltonianMC {
507    /// Create a new HMC sampler.
508    ///
509    /// - `log_target`: log of the unnormalized target density.
510    /// - `grad_log_target`: gradient of `log_target`.
511    /// - `step_size`: leapfrog integrator step size (epsilon).
512    /// - `num_leapfrog`: number of leapfrog steps per proposal.
513    /// - `dim`: dimensionality of the parameter space.
514    pub fn new(
515        log_target: fn(&[f64]) -> f64,
516        grad_log_target: fn(&[f64]) -> Vec<f64>,
517        step_size: f64,
518        num_leapfrog: usize,
519        dim: usize,
520    ) -> Self {
521        Self {
522            log_target,
523            grad_log_target,
524            step_size,
525            num_leapfrog,
526            dim,
527        }
528    }
529
530    /// Run the HMC sampler, discarding `burn_in` initial samples.
531    ///
532    /// # Errors
533    ///
534    /// Returns an error if `num_samples` is zero.
535    pub fn sample(
536        &mut self,
537        num_samples: usize,
538        burn_in: usize,
539        seed: u64,
540    ) -> RandResult<McmcResult> {
541        if num_samples == 0 {
542            return Err(RandError::InvalidSize(
543                "num_samples must be positive".to_string(),
544            ));
545        }
546        let mut rng = Xoshiro256::new(seed);
547        let mut q = vec![0.0f64; self.dim];
548        let total = burn_in + num_samples;
549        let mut samples = Vec::with_capacity(num_samples);
550        let mut accepted = 0u64;
551
552        for step in 0..total {
553            // Sample momentum
554            let p: Vec<f64> = (0..self.dim).map(|_| rng.next_normal()).collect();
555            let current_k: f64 = p.iter().map(|pi| 0.5 * pi * pi).sum();
556            let current_u = -(self.log_target)(&q);
557
558            let mut q_new = q.clone();
559            let mut p_new = p.clone();
560
561            // Leapfrog integration
562            let grad = (self.grad_log_target)(&q_new);
563            for (pi, gi) in p_new.iter_mut().zip(grad.iter()) {
564                *pi += 0.5 * self.step_size * gi;
565            }
566            for _ in 0..self.num_leapfrog.saturating_sub(1) {
567                for (qi, pi) in q_new.iter_mut().zip(p_new.iter()) {
568                    *qi += self.step_size * pi;
569                }
570                let grad = (self.grad_log_target)(&q_new);
571                for (pi, gi) in p_new.iter_mut().zip(grad.iter()) {
572                    *pi += self.step_size * gi;
573                }
574            }
575            // Final full position step and half momentum step
576            for (qi, pi) in q_new.iter_mut().zip(p_new.iter()) {
577                *qi += self.step_size * pi;
578            }
579            let grad = (self.grad_log_target)(&q_new);
580            for (pi, gi) in p_new.iter_mut().zip(grad.iter()) {
581                *pi += 0.5 * self.step_size * gi;
582            }
583            // Negate momentum (for reversibility, not strictly necessary for accept/reject)
584            for pi in &mut p_new {
585                *pi = -*pi;
586            }
587
588            let proposed_u = -(self.log_target)(&q_new);
589            let proposed_k: f64 = p_new.iter().map(|pi| 0.5 * pi * pi).sum();
590
591            let log_alpha = current_u + current_k - proposed_u - proposed_k;
592            if log_alpha >= 0.0 || rng.next_f64().ln() < log_alpha {
593                q = q_new;
594                if step >= burn_in {
595                    accepted += 1;
596                }
597            }
598
599            if step >= burn_in {
600                samples.push(q.clone());
601            }
602        }
603
604        let ess = if self.dim > 0 {
605            let first_dim: Vec<f64> = samples.iter().map(|s| s[0]).collect();
606            effective_sample_size(&first_dim)
607        } else {
608            num_samples as f64
609        };
610
611        Ok(McmcResult {
612            samples,
613            acceptance_rate: accepted as f64 / num_samples as f64,
614            effective_sample_size: ess,
615        })
616    }
617}
618
619// ---------------------------------------------------------------------------
620// Financial Monte Carlo
621// ---------------------------------------------------------------------------
622
623/// Parameters for Black-Scholes style option pricing.
624#[derive(Clone, Debug)]
625pub struct BlackScholesParams {
626    /// Current spot price.
627    pub spot: f64,
628    /// Strike price.
629    pub strike: f64,
630    /// Risk-free interest rate (annualized).
631    pub risk_free_rate: f64,
632    /// Volatility (annualized).
633    pub volatility: f64,
634    /// Time to maturity in years.
635    pub time_to_maturity: f64,
636}
637
638/// Analytical Black-Scholes price for a European option.
639///
640/// Useful for validating Monte Carlo estimates against the closed-form
641/// solution.
642pub fn bs_analytical(params: &BlackScholesParams, is_call: bool) -> f64 {
643    let s = params.spot;
644    let k = params.strike;
645    let r = params.risk_free_rate;
646    let sigma = params.volatility;
647    let t = params.time_to_maturity;
648
649    let d1 = ((s / k).ln() + (r + 0.5 * sigma * sigma) * t) / (sigma * t.sqrt());
650    let d2 = d1 - sigma * t.sqrt();
651
652    let nd1 = normal_cdf(d1);
653    let nd2 = normal_cdf(d2);
654
655    if is_call {
656        s * nd1 - k * (-r * t).exp() * nd2
657    } else {
658        k * (-r * t).exp() * normal_cdf(-d2) - s * normal_cdf(-d1)
659    }
660}
661
662/// Approximate standard normal CDF (Abramowitz & Stegun 26.2.17).
663///
664/// Accurate to approximately 1e-7.
665pub fn normal_cdf(x: f64) -> f64 {
666    let b1 = 0.319_381_530;
667    let b2 = -0.356_563_782;
668    let b3 = 1.781_477_937;
669    let b4 = -1.821_255_978;
670    let b5 = 1.330_274_429;
671    let pp = 0.231_641_9;
672
673    if x >= 0.0 {
674        let t = 1.0 / (1.0 + pp * x);
675        let poly = ((((b5 * t + b4) * t + b3) * t + b2) * t + b1) * t;
676        let pdf = (-0.5 * x * x).exp() / (2.0 * core::f64::consts::PI).sqrt();
677        1.0 - pdf * poly
678    } else {
679        1.0 - normal_cdf(-x)
680    }
681}
682
683/// Monte Carlo pricing of a European call or put option.
684///
685/// Simulates geometric Brownian motion terminal prices and averages the
686/// discounted payoff.
687///
688/// # Errors
689///
690/// Returns an error if the configuration is invalid.
691pub fn mc_european_option(
692    params: &BlackScholesParams,
693    is_call: bool,
694    config: &MonteCarloConfig,
695) -> RandResult<MonteCarloResult> {
696    config.validate()?;
697    let mut rng = Xoshiro256::new(config.seed);
698    let r = params.risk_free_rate;
699    let sigma = params.volatility;
700    let t = params.time_to_maturity;
701    let discount = (-r * t).exp();
702    let drift = (r - 0.5 * sigma * sigma) * t;
703    let vol_sqrt_t = sigma * t.sqrt();
704
705    let values: Vec<f64> = (0..config.num_samples)
706        .map(|_| {
707            let z = rng.next_normal();
708            let s_t = params.spot * (drift + vol_sqrt_t * z).exp();
709            let payoff = if is_call {
710                (s_t - params.strike).max(0.0)
711            } else {
712                (params.strike - s_t).max(0.0)
713            };
714            discount * payoff
715        })
716        .collect();
717
718    Ok(build_result(&values, config))
719}
720
721/// Monte Carlo pricing of an arithmetic Asian option.
722///
723/// The payoff depends on the arithmetic average of the price at
724/// `num_time_steps` equally spaced monitoring dates.
725///
726/// # Errors
727///
728/// Returns an error if the configuration is invalid or `num_time_steps` is zero.
729pub fn mc_asian_option(
730    params: &BlackScholesParams,
731    num_time_steps: usize,
732    is_call: bool,
733    config: &MonteCarloConfig,
734) -> RandResult<MonteCarloResult> {
735    config.validate()?;
736    if num_time_steps == 0 {
737        return Err(RandError::InvalidSize(
738            "num_time_steps must be positive".to_string(),
739        ));
740    }
741    let mut rng = Xoshiro256::new(config.seed);
742    let r = params.risk_free_rate;
743    let sigma = params.volatility;
744    let t = params.time_to_maturity;
745    let dt = t / num_time_steps as f64;
746    let drift = (r - 0.5 * sigma * sigma) * dt;
747    let vol_sqrt_dt = sigma * dt.sqrt();
748    let discount = (-r * t).exp();
749
750    let values: Vec<f64> = (0..config.num_samples)
751        .map(|_| {
752            let mut s = params.spot;
753            let mut sum = 0.0;
754            for _ in 0..num_time_steps {
755                let z = rng.next_normal();
756                s *= (drift + vol_sqrt_dt * z).exp();
757                sum += s;
758            }
759            let avg = sum / num_time_steps as f64;
760            let payoff = if is_call {
761                (avg - params.strike).max(0.0)
762            } else {
763                (params.strike - avg).max(0.0)
764            };
765            discount * payoff
766        })
767        .collect();
768
769    Ok(build_result(&values, config))
770}
771
772/// Monte Carlo pricing of a barrier option (up/down, knock-in/knock-out).
773///
774/// Uses discrete monitoring at each time step.
775///
776/// # Errors
777///
778/// Returns an error if the configuration is invalid.
779pub fn mc_barrier_option(
780    params: &BlackScholesParams,
781    barrier: f64,
782    is_up: bool,
783    is_knock_out: bool,
784    config: &MonteCarloConfig,
785) -> RandResult<MonteCarloResult> {
786    config.validate()?;
787    let mut rng = Xoshiro256::new(config.seed);
788    let r = params.risk_free_rate;
789    let sigma = params.volatility;
790    let t = params.time_to_maturity;
791    let num_steps = 252; // daily monitoring
792    let dt = t / num_steps as f64;
793    let drift = (r - 0.5 * sigma * sigma) * dt;
794    let vol_sqrt_dt = sigma * dt.sqrt();
795    let discount = (-r * t).exp();
796
797    let values: Vec<f64> = (0..config.num_samples)
798        .map(|_| {
799            let mut s = params.spot;
800            let mut hit_barrier = false;
801            for _ in 0..num_steps {
802                let z = rng.next_normal();
803                s *= (drift + vol_sqrt_dt * z).exp();
804                if (is_up && s >= barrier) || (!is_up && s <= barrier) {
805                    hit_barrier = true;
806                }
807            }
808            let payoff = (s - params.strike).max(0.0); // call payoff
809            let active = if is_knock_out {
810                !hit_barrier
811            } else {
812                hit_barrier
813            };
814            if active { discount * payoff } else { 0.0 }
815        })
816        .collect();
817
818    Ok(build_result(&values, config))
819}
820
821// ---------------------------------------------------------------------------
822// Convergence Analysis
823// ---------------------------------------------------------------------------
824
825/// Run Monte Carlo integration at multiple sample sizes to observe convergence.
826///
827/// Returns one [`MonteCarloResult`] per entry in `sample_sizes`.
828///
829/// # Errors
830///
831/// Returns an error if any sample size is zero.
832pub fn convergence_analysis<F>(
833    f: F,
834    sample_sizes: &[usize],
835    seed: u64,
836) -> RandResult<Vec<MonteCarloResult>>
837where
838    F: Fn(f64) -> f64,
839{
840    let mut results = Vec::with_capacity(sample_sizes.len());
841    for &n in sample_sizes {
842        let config = MonteCarloConfig {
843            num_samples: n,
844            seed,
845            confidence_level: 0.95,
846            use_antithetic: false,
847            use_control_variate: false,
848        };
849        results.push(mc_integrate(&f, &config)?);
850    }
851    Ok(results)
852}
853
854/// Estimate the effective sample size from autocorrelation.
855///
856/// Uses the initial positive sequence estimator: sums autocorrelations
857/// until they become negative, then ESS = N / (1 + 2 * sum).
858pub fn effective_sample_size(samples: &[f64]) -> f64 {
859    let n = samples.len();
860    if n < 2 {
861        return n as f64;
862    }
863    let mean = samples.iter().sum::<f64>() / n as f64;
864    let var: f64 = samples.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / n as f64;
865    if var < 1e-300 {
866        return n as f64;
867    }
868
869    let max_lag = n / 2;
870    let mut sum_rho = 0.0;
871    for lag in 1..max_lag {
872        let mut autocov = 0.0;
873        for i in 0..(n - lag) {
874            autocov += (samples[i] - mean) * (samples[i + lag] - mean);
875        }
876        autocov /= n as f64;
877        let rho = autocov / var;
878        if rho < 0.0 {
879            break;
880        }
881        sum_rho += rho;
882    }
883
884    let tau = 1.0 + 2.0 * sum_rho;
885    n as f64 / tau
886}
887
888/// Compute the Gelman-Rubin R-hat statistic for convergence of multiple
889/// MCMC chains.
890///
891/// Values close to 1.0 indicate convergence; > 1.1 suggests further
892/// sampling is needed.
893///
894/// Returns `NaN` if fewer than 2 chains or any chain has length < 2.
895pub fn gelman_rubin(chains: &[Vec<f64>]) -> f64 {
896    if chains.len() < 2 {
897        return f64::NAN;
898    }
899    let m = chains.len() as f64;
900    let n = chains[0].len();
901    if n < 2 {
902        return f64::NAN;
903    }
904
905    // Chain means and overall mean.
906    let chain_means: Vec<f64> = chains
907        .iter()
908        .map(|c| c.iter().sum::<f64>() / c.len() as f64)
909        .collect();
910    let overall_mean = chain_means.iter().sum::<f64>() / m;
911
912    // Between-chain variance B.
913    let b = (n as f64 / (m - 1.0))
914        * chain_means
915            .iter()
916            .map(|&cm| (cm - overall_mean).powi(2))
917            .sum::<f64>();
918
919    // Within-chain variance W.
920    let w: f64 = chains
921        .iter()
922        .zip(chain_means.iter())
923        .map(|(c, &cm)| c.iter().map(|&x| (x - cm).powi(2)).sum::<f64>() / (n as f64 - 1.0))
924        .sum::<f64>()
925        / m;
926
927    // Pooled variance and R-hat.
928    let var_hat = (1.0 - 1.0 / n as f64) * w + b / n as f64;
929    if w < 1e-300 {
930        return f64::NAN;
931    }
932    (var_hat / w).sqrt()
933}
934
935// ---------------------------------------------------------------------------
936// Tests
937// ---------------------------------------------------------------------------
938
939#[cfg(test)]
940mod tests {
941    use super::*;
942
943    fn default_config(n: usize) -> MonteCarloConfig {
944        MonteCarloConfig {
945            num_samples: n,
946            seed: 12345,
947            confidence_level: 0.95,
948            use_antithetic: false,
949            use_control_variate: false,
950        }
951    }
952
953    // 1. Integrate sqrt(1-x^2) on [0,1] -> pi/4
954    #[test]
955    fn test_mc_integrate_pi_quarter() {
956        let config = default_config(200_000);
957        let result = mc_integrate(|x| (1.0 - x * x).sqrt(), &config).expect("integration failed");
958        let expected = core::f64::consts::FRAC_PI_4;
959        assert!(
960            (result.estimate - expected).abs() < 0.01,
961            "estimate {} not close to pi/4 = {}",
962            result.estimate,
963            expected
964        );
965    }
966
967    // 2. ND integration -- volume of unit sphere in 3D via indicator
968    //    V_3 = (4/3)*pi, fraction in [0,1]^3 octant = (4/3)*pi/8 = pi/6
969    #[test]
970    fn test_mc_integrate_nd_sphere() {
971        let config = default_config(500_000);
972        let result = mc_integrate_nd(
973            |x| {
974                let r2: f64 = x.iter().map(|xi| xi * xi).sum();
975                if r2 <= 1.0 { 1.0 } else { 0.0 }
976            },
977            3,
978            &config,
979        )
980        .expect("nd integration failed");
981        let expected = core::f64::consts::PI / 6.0;
982        assert!(
983            (result.estimate - expected).abs() < 0.01,
984            "estimate {} not close to pi/6 = {}",
985            result.estimate,
986            expected
987        );
988    }
989
990    // 3. Antithetic variates reduce variance
991    #[test]
992    fn test_antithetic_reduces_variance() {
993        let config = default_config(100_000);
994        let naive = mc_integrate(|x| (1.0 - x * x).sqrt(), &config).expect("naive failed");
995        let anti = antithetic_integrate(|x| (1.0 - x * x).sqrt(), &config).expect("anti failed");
996        assert!(
997            anti.variance < naive.variance,
998            "antithetic variance {} should be < naive variance {}",
999            anti.variance,
1000            naive.variance
1001        );
1002    }
1003
1004    // 4. Control variate accuracy
1005    #[test]
1006    fn test_control_variate() {
1007        let config = default_config(100_000);
1008        // Integrate x^2 using x as control (known mean = 0.5).
1009        let result = control_variate_integrate(|x| x * x, |x| x, 0.5, &config).expect("cv failed");
1010        let expected = 1.0 / 3.0;
1011        assert!(
1012            (result.estimate - expected).abs() < 0.005,
1013            "estimate {} not close to 1/3",
1014            result.estimate
1015        );
1016    }
1017
1018    // 5. Stratified sampling
1019    #[test]
1020    fn test_stratified_sampling() {
1021        let config = default_config(100_000);
1022        let result =
1023            stratified_integrate(|x| (1.0 - x * x).sqrt(), 100, &config).expect("strat failed");
1024        let expected = core::f64::consts::FRAC_PI_4;
1025        assert!(
1026            (result.estimate - expected).abs() < 0.005,
1027            "estimate {} not close to pi/4",
1028            result.estimate
1029        );
1030    }
1031
1032    // 6. Metropolis-Hastings on standard Gaussian target
1033    #[test]
1034    fn test_metropolis_hastings_gaussian() {
1035        fn log_normal(x: &[f64]) -> f64 {
1036            -0.5 * x[0] * x[0]
1037        }
1038        let mut mh = MetropolisHastings::new(log_normal, 1.0, 1);
1039        let result = mh.sample(50_000, 5_000, 42).expect("mh failed");
1040        // Mean should be near 0.
1041        let mean: f64 =
1042            result.samples.iter().map(|s| s[0]).sum::<f64>() / result.samples.len() as f64;
1043        assert!(mean.abs() < 0.1, "MH mean {} should be near 0", mean);
1044        assert!(
1045            result.acceptance_rate > 0.1 && result.acceptance_rate < 0.95,
1046            "acceptance rate {} out of range",
1047            result.acceptance_rate
1048        );
1049    }
1050
1051    // 7. HMC on 2D Gaussian
1052    #[test]
1053    fn test_hmc_2d_gaussian() {
1054        fn log_target(x: &[f64]) -> f64 {
1055            -0.5 * (x[0] * x[0] + x[1] * x[1])
1056        }
1057        fn grad_log_target(x: &[f64]) -> Vec<f64> {
1058            vec![-x[0], -x[1]]
1059        }
1060        let mut hmc = HamiltonianMC::new(log_target, grad_log_target, 0.1, 20, 2);
1061        let result = hmc.sample(10_000, 2_000, 99).expect("hmc failed");
1062        let mean_x: f64 =
1063            result.samples.iter().map(|s| s[0]).sum::<f64>() / result.samples.len() as f64;
1064        let mean_y: f64 =
1065            result.samples.iter().map(|s| s[1]).sum::<f64>() / result.samples.len() as f64;
1066        assert!(mean_x.abs() < 0.1, "HMC mean_x {} should be near 0", mean_x);
1067        assert!(mean_y.abs() < 0.1, "HMC mean_y {} should be near 0", mean_y);
1068        assert!(
1069            result.acceptance_rate > 0.5,
1070            "HMC acceptance {} too low",
1071            result.acceptance_rate
1072        );
1073    }
1074
1075    // 8. European call vs Black-Scholes analytical
1076    #[test]
1077    fn test_european_call_bs() {
1078        let params = BlackScholesParams {
1079            spot: 100.0,
1080            strike: 100.0,
1081            risk_free_rate: 0.05,
1082            volatility: 0.2,
1083            time_to_maturity: 1.0,
1084        };
1085        let config = default_config(1_000_000);
1086        let mc = mc_european_option(&params, true, &config).expect("eu call failed");
1087        let analytical = bs_analytical(&params, true);
1088        assert!(
1089            (mc.estimate - analytical).abs() < 0.5,
1090            "MC price {} not close to BS price {}",
1091            mc.estimate,
1092            analytical
1093        );
1094    }
1095
1096    // 9. Asian option convergence (just check it runs and is positive)
1097    #[test]
1098    fn test_asian_option() {
1099        let params = BlackScholesParams {
1100            spot: 100.0,
1101            strike: 100.0,
1102            risk_free_rate: 0.05,
1103            volatility: 0.2,
1104            time_to_maturity: 1.0,
1105        };
1106        let config = default_config(50_000);
1107        let result = mc_asian_option(&params, 50, true, &config).expect("asian failed");
1108        assert!(result.estimate > 0.0, "Asian call price should be positive");
1109        // Asian call should be cheaper than European call.
1110        let eu = mc_european_option(&params, true, &config).expect("eu failed");
1111        assert!(
1112            result.estimate < eu.estimate + 2.0,
1113            "Asian call {} should be <= European call {}",
1114            result.estimate,
1115            eu.estimate
1116        );
1117    }
1118
1119    // 10. Barrier option
1120    #[test]
1121    fn test_barrier_option() {
1122        let params = BlackScholesParams {
1123            spot: 100.0,
1124            strike: 100.0,
1125            risk_free_rate: 0.05,
1126            volatility: 0.2,
1127            time_to_maturity: 1.0,
1128        };
1129        let config = default_config(100_000);
1130        let result =
1131            mc_barrier_option(&params, 130.0, true, true, &config).expect("barrier failed");
1132        // Up-and-out call: price should be less than vanilla European call.
1133        let eu = mc_european_option(&params, true, &config).expect("eu failed");
1134        assert!(
1135            result.estimate < eu.estimate + 0.5,
1136            "Barrier {} should be <= European {}",
1137            result.estimate,
1138            eu.estimate
1139        );
1140        assert!(result.estimate >= 0.0, "Price should be non-negative");
1141    }
1142
1143    // 11. Convergence analysis -- error decreases with N
1144    #[test]
1145    fn test_convergence_analysis() {
1146        let sizes = [1_000, 10_000, 100_000];
1147        let results = convergence_analysis(|x| x * x, &sizes, 42).expect("conv failed");
1148        assert_eq!(results.len(), 3);
1149        // Standard error should generally decrease.
1150        assert!(
1151            results[2].std_error < results[0].std_error,
1152            "SE at 100k ({}) should be < SE at 1k ({})",
1153            results[2].std_error,
1154            results[0].std_error
1155        );
1156    }
1157
1158    // 12. Effective sample size
1159    #[test]
1160    fn test_effective_sample_size() {
1161        // Uncorrelated samples => ESS ~= N.
1162        let mut rng = Xoshiro256::new(42);
1163        let samples: Vec<f64> = (0..10_000).map(|_| rng.next_f64()).collect();
1164        let ess = effective_sample_size(&samples);
1165        assert!(
1166            ess > 5_000.0,
1167            "ESS {} should be high for uncorrelated samples",
1168            ess
1169        );
1170
1171        // Highly correlated samples => ESS << N.
1172        let mut corr = Vec::with_capacity(10_000);
1173        let mut x = 0.0f64;
1174        for _ in 0..10_000 {
1175            x = 0.999 * x + 0.001 * rng.next_normal();
1176            corr.push(x);
1177        }
1178        let ess_corr = effective_sample_size(&corr);
1179        assert!(
1180            ess_corr < 1_000.0,
1181            "ESS {} should be low for correlated samples",
1182            ess_corr
1183        );
1184    }
1185
1186    // 13. Gelman-Rubin R-hat close to 1 for converged chains
1187    #[test]
1188    fn test_gelman_rubin() {
1189        let mut rng1 = Xoshiro256::new(1);
1190        let mut rng2 = Xoshiro256::new(2);
1191        let mut rng3 = Xoshiro256::new(3);
1192        let chain1: Vec<f64> = (0..5_000).map(|_| rng1.next_normal()).collect();
1193        let chain2: Vec<f64> = (0..5_000).map(|_| rng2.next_normal()).collect();
1194        let chain3: Vec<f64> = (0..5_000).map(|_| rng3.next_normal()).collect();
1195        let rhat = gelman_rubin(&[chain1, chain2, chain3]);
1196        assert!(
1197            (rhat - 1.0).abs() < 0.05,
1198            "R-hat {} should be close to 1.0",
1199            rhat
1200        );
1201    }
1202
1203    // 14. Importance sampling
1204    #[test]
1205    fn test_importance_sampling() {
1206        // Integrate x^2 on [0,1] using uniform(0,1) proposal (trivial case).
1207        let config = default_config(100_000);
1208        let result = mc_integrate_importance(
1209            |x| x * x,
1210            |_x| 1.0,    // uniform pdf
1211            |rng| rng(), // sample from uniform
1212            &config,
1213        )
1214        .expect("importance sampling failed");
1215        let expected = 1.0 / 3.0;
1216        assert!(
1217            (result.estimate - expected).abs() < 0.01,
1218            "estimate {} not close to 1/3",
1219            result.estimate
1220        );
1221    }
1222
1223    // 15. Config validation
1224    #[test]
1225    fn test_config_validation() {
1226        let bad_size = MonteCarloConfig {
1227            num_samples: 0,
1228            ..MonteCarloConfig::default()
1229        };
1230        assert!(mc_integrate(|x| x, &bad_size).is_err());
1231
1232        let bad_confidence = MonteCarloConfig {
1233            confidence_level: 1.5,
1234            ..MonteCarloConfig::default()
1235        };
1236        assert!(mc_integrate(|x| x, &bad_confidence).is_err());
1237
1238        let bad_confidence2 = MonteCarloConfig {
1239            confidence_level: 0.0,
1240            ..MonteCarloConfig::default()
1241        };
1242        assert!(mc_integrate(|x| x, &bad_confidence2).is_err());
1243    }
1244}