Skip to main content

scirs2_core/random/
advanced_numerical.rs

1//! Advanced numerical methods for ultra-high-performance scientific computing
2//!
3//! This module implements cutting-edge algorithms for variance reduction, adaptive sampling,
4//! and multi-level Monte Carlo methods that are essential for modern scientific computing
5//! and computational finance applications.
6//!
7//! # Key Algorithms
8//!
9//! - **Multi-level Monte Carlo (MLMC)**: Dramatically reduces computational complexity
10//! - **Adaptive sampling**: Dynamically adjusts sampling based on variance estimates
11//! - **Antithetic variates**: Variance reduction through negatively correlated samples
12//! - **Control variates**: Variance reduction using auxiliary functions
13//! - **Importance sampling**: Focuses sampling on high-importance regions
14//! - **Sequential Monte Carlo**: Advanced particle filtering methods
15//! - **Metropolis-Hastings**: MCMC with adaptive acceptance rates
16//!
17//! # Examples
18//!
19//! ```rust
20//! use scirs2_core::random::advanced_numerical::*;
21//! use scirs2_core::random::core::Random;
22//! use rand_distr::Uniform;
23//!
24//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
25//! // Define a computation function that returns the required format for MLMC
26//! let compute_option_price = |level: usize, samples: usize| -> Result<Vec<f64>, String> {
27//!     // Generate samples for this level
28//!     let mut rng = scirs2_core::random::seeded_rng(42 + level as u64);
29//!     let mut results = Vec::with_capacity(samples);
30//!     for _ in 0..samples {
31//!         // Simple Black-Scholes approximation based on level
32//!         let dt = 1.0 / (2.0_f64.powi(level as i32));
33//!         let random_val = rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"));
34//!         let price = 100.0 * (1.0 + 0.05 * dt * random_val);
35//!         results.push((price - 100.0).max(0.0)); // Call option payoff
36//!     }
37//!     Ok(results)
38//! };
39//!
40//! // Define an expensive computation function
41//! let expensive_computation = |rng: &mut Random<rand::rngs::StdRng>| -> f64 {
42//!     // Monte Carlo integration of a complex function
43//!     let samples: Vec<f64> = (0..100).map(|_| rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"))).collect();
44//!     samples.iter().map(|&x| (x * std::f64::consts::PI).sin().powi(2)).sum::<f64>() / samples.len() as f64
45//! };
46//!
47//! // Multi-level Monte Carlo for option pricing
48//! let mlmc = MultiLevelMonteCarlo::new(3, 100); // Smaller values for doc test
49//! let estimate = mlmc.estimate(compute_option_price)?;
50//!
51//! // Adaptive sampling with variance tracking
52//! let mut adaptive = AdaptiveSampler::new(0.1, 1000); // More relaxed for doc test
53//! let result = adaptive.sample_until_convergence(expensive_computation)?;
54//! # Ok(())
55//! # }
56//! ```
57
58use crate::random::{
59    core::{seeded_rng, Random},
60    distributions::{Beta, MultivariateNormal},
61    parallel::{ParallelRng, ThreadLocalRngPool},
62};
63use ::ndarray::{Array1, Array2};
64use rand::Rng;
65use rand_distr::{Distribution, Normal, Uniform};
66use std::collections::VecDeque;
67
68/// Multi-level Monte Carlo estimator for variance reduction
69///
70/// MLMC uses a telescoping sum across multiple levels of approximation to achieve
71/// better convergence rates than standard Monte Carlo methods. This is particularly
72/// useful for stochastic differential equations and option pricing.
73#[derive(Debug, Clone)]
74pub struct MultiLevelMonteCarlo {
75    max_levels: usize,
76    base_samples: usize,
77    variance_tolerance: f64,
78    convergence_factor: f64,
79}
80
81impl MultiLevelMonteCarlo {
82    /// Create a new MLMC estimator
83    pub fn new(max_levels: usize, base_samples: usize) -> Self {
84        Self {
85            max_levels,
86            base_samples,
87            variance_tolerance: 1e-6,
88            convergence_factor: 2.0,
89        }
90    }
91
92    /// Set variance tolerance for adaptive level selection
93    pub fn with_tolerance(mut self, tolerance: f64) -> Self {
94        self.variance_tolerance = tolerance;
95        self
96    }
97
98    /// Set convergence factor for sample size scaling
99    pub fn with_convergence_factor(mut self, factor: f64) -> Self {
100        self.convergence_factor = factor;
101        self
102    }
103
104    /// Estimate using multi-level Monte Carlo
105    pub fn estimate<F>(&self, mut level_function: F) -> Result<MLMCResult, String>
106    where
107        F: FnMut(usize, usize) -> Result<Vec<f64>, String>,
108    {
109        let mut estimates = Vec::new();
110        let mut variances = Vec::new();
111        let mut total_samples = 0;
112
113        for level in 0..self.max_levels {
114            // Adaptive sample size calculation
115            let level_samples = if level == 0 {
116                self.base_samples
117            } else {
118                (self.base_samples as f64 * self.convergence_factor.powi(level as i32)) as usize
119            };
120
121            // Compute level estimate
122            let samples = level_function(level, level_samples)?;
123            let mean = samples.iter().sum::<f64>() / samples.len() as f64;
124            let variance = samples.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
125                / (samples.len() - 1) as f64;
126
127            estimates.push(mean);
128            variances.push(variance);
129            total_samples += level_samples;
130
131            // Check convergence criteria
132            if variance < self.variance_tolerance && level > 2 {
133                break;
134            }
135        }
136
137        // Telescoping sum calculation
138        let mut mlmc_estimate = estimates[0];
139        for i in 1..estimates.len() {
140            mlmc_estimate += estimates[i] - estimates[i - 1];
141        }
142
143        Ok(MLMCResult {
144            estimate: mlmc_estimate,
145            variance: variances.iter().sum::<f64>() / variances.len() as f64,
146            levels_used: estimates.len(),
147            total_samples,
148            level_estimates: estimates,
149            level_variances: variances,
150        })
151    }
152
153    /// Parallel MLMC estimation using thread pool
154    pub fn estimate_parallel<F>(&self, level_function: F) -> Result<MLMCResult, String>
155    where
156        F: Fn(usize, usize) -> Result<Vec<f64>, String> + Send + Sync,
157    {
158        let pool = ThreadLocalRngPool::new(42);
159
160        // Create level tasks
161        let level_tasks: Vec<_> = (0..self.max_levels)
162            .map(|level| {
163                let level_samples = if level == 0 {
164                    self.base_samples
165                } else {
166                    (self.base_samples as f64 * self.convergence_factor.powi(level as i32)) as usize
167                };
168                (level, level_samples)
169            })
170            .collect();
171
172        // Execute levels in parallel (simplified implementation)
173        let mut level_results = Vec::new();
174        for &(level, samples) in &level_tasks {
175            let result = level_function(level, samples)?;
176            level_results.push(result);
177        }
178
179        // Process results
180        let mut estimates = Vec::new();
181        let mut variances = Vec::new();
182        let mut total_samples = 0;
183
184        for (i, samples) in level_results.iter().enumerate() {
185            let mean = samples.iter().sum::<f64>() / samples.len() as f64;
186            let variance = samples.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
187                / (samples.len() - 1) as f64;
188
189            estimates.push(mean);
190            variances.push(variance);
191            total_samples += samples.len();
192        }
193
194        // Telescoping sum
195        let mut mlmc_estimate = estimates[0];
196        for i in 1..estimates.len() {
197            mlmc_estimate += estimates[i] - estimates[i - 1];
198        }
199
200        Ok(MLMCResult {
201            estimate: mlmc_estimate,
202            variance: variances.iter().sum::<f64>() / variances.len() as f64,
203            levels_used: estimates.len(),
204            total_samples,
205            level_estimates: estimates,
206            level_variances: variances,
207        })
208    }
209}
210
211/// Result from Multi-level Monte Carlo estimation
212#[derive(Debug, Clone)]
213pub struct MLMCResult {
214    pub estimate: f64,
215    pub variance: f64,
216    pub levels_used: usize,
217    pub total_samples: usize,
218    pub level_estimates: Vec<f64>,
219    pub level_variances: Vec<f64>,
220}
221
222impl MLMCResult {
223    /// Calculate confidence interval
224    pub fn confidence_interval(&self, confidence: f64) -> (f64, f64) {
225        let z_score = match confidence {
226            0.90 => 1.645,
227            0.95 => 1.96,
228            0.99 => 2.576,
229            _ => 1.96, // Default to 95%
230        };
231
232        let std_error = (self.variance / self.total_samples as f64).sqrt();
233        let margin = z_score * std_error;
234
235        (self.estimate - margin, self.estimate + margin)
236    }
237
238    /// Calculate relative error
239    pub fn relative_error(&self) -> f64 {
240        if self.estimate.abs() > 1e-10 {
241            (self.variance / self.total_samples as f64).sqrt() / self.estimate.abs()
242        } else {
243            f64::INFINITY
244        }
245    }
246}
247
248/// Adaptive sampling with dynamic variance tracking
249///
250/// This sampler automatically adjusts sample sizes based on running variance estimates
251/// to achieve desired accuracy with minimal computational cost.
252#[derive(Debug)]
253pub struct AdaptiveSampler {
254    target_tolerance: f64,
255    max_samples: usize,
256    min_batch_size: usize,
257    max_batch_size: usize,
258    variance_window: usize,
259    running_estimates: VecDeque<f64>,
260    running_variances: VecDeque<f64>,
261}
262
263impl AdaptiveSampler {
264    /// Create a new adaptive sampler
265    pub fn new(target_tolerance: f64, max_samples: usize) -> Self {
266        Self {
267            target_tolerance,
268            max_samples,
269            min_batch_size: 100,
270            max_batch_size: 10000,
271            variance_window: 20,
272            running_estimates: VecDeque::new(),
273            running_variances: VecDeque::new(),
274        }
275    }
276
277    /// Configure batch size limits
278    pub fn with_batch_limits(mut self, min_batch: usize, max_batch: usize) -> Self {
279        self.min_batch_size = min_batch;
280        self.max_batch_size = max_batch;
281        self
282    }
283
284    /// Sample until convergence or max samples reached
285    pub fn sample_until_convergence<F>(&mut self, mut sampler: F) -> Result<AdaptiveResult, String>
286    where
287        F: FnMut(&mut Random<rand::rngs::StdRng>) -> f64,
288    {
289        let mut rng = seeded_rng(42);
290        let mut total_samples = 0;
291        let mut current_estimate = 0.0;
292        let mut current_variance = f64::INFINITY;
293        let mut batch_size = self.min_batch_size;
294
295        while total_samples < self.max_samples {
296            // Sample current batch
297            let mut batch_samples = Vec::with_capacity(batch_size);
298            for _ in 0..batch_size {
299                batch_samples.push(sampler(&mut rng));
300            }
301
302            // Update running statistics
303            let batch_mean = batch_samples.iter().sum::<f64>() / batch_samples.len() as f64;
304            let batch_variance = batch_samples
305                .iter()
306                .map(|x| (x - batch_mean).powi(2))
307                .sum::<f64>()
308                / (batch_samples.len() - 1) as f64;
309
310            self.running_estimates.push_back(batch_mean);
311            self.running_variances.push_back(batch_variance);
312
313            // Maintain window size
314            if self.running_estimates.len() > self.variance_window {
315                self.running_estimates.pop_front();
316                self.running_variances.pop_front();
317            }
318
319            // Update global estimates
320            let total_weight = self.running_estimates.len() as f64;
321            current_estimate = self.running_estimates.iter().sum::<f64>() / total_weight;
322            current_variance = self.running_variances.iter().sum::<f64>() / total_weight;
323
324            total_samples += batch_size;
325
326            // Check convergence
327            let std_error = (current_variance / total_samples as f64).sqrt();
328            let relative_error = if current_estimate.abs() > 1e-10 {
329                std_error / current_estimate.abs()
330            } else {
331                std_error
332            };
333
334            if relative_error < self.target_tolerance {
335                break;
336            }
337
338            // Adaptive batch size adjustment
339            batch_size = self.adapt_batch_size(current_variance, total_samples, relative_error);
340        }
341
342        Ok(AdaptiveResult {
343            estimate: current_estimate,
344            variance: current_variance,
345            samples_used: total_samples,
346            converged: self.check_convergence(current_estimate, current_variance, total_samples),
347            final_batch_size: batch_size,
348        })
349    }
350
351    /// Adapt batch size based on current variance and convergence rate
352    fn adapt_batch_size(&self, variance: f64, samples_so_far: usize, relative_error: f64) -> usize {
353        // Increase batch size if variance is high or we're far from target
354        let variance_factor = (variance / self.target_tolerance).sqrt().max(0.1).min(10.0);
355        let error_factor = (relative_error / self.target_tolerance).max(0.1).min(10.0);
356
357        let suggested_size = (self.min_batch_size as f64 * variance_factor * error_factor) as usize;
358        suggested_size
359            .max(self.min_batch_size)
360            .min(self.max_batch_size)
361    }
362
363    /// Check if sampling has converged
364    fn check_convergence(&self, estimate: f64, variance: f64, total_samples: usize) -> bool {
365        let std_error = (variance / total_samples as f64).sqrt();
366        let relative_error = if estimate.abs() > 1e-10 {
367            std_error / estimate.abs()
368        } else {
369            std_error
370        };
371
372        relative_error < self.target_tolerance
373    }
374}
375
376/// Result from adaptive sampling
377#[derive(Debug, Clone)]
378pub struct AdaptiveResult {
379    pub estimate: f64,
380    pub variance: f64,
381    pub samples_used: usize,
382    pub converged: bool,
383    pub final_batch_size: usize,
384}
385
386impl AdaptiveResult {
387    /// Calculate confidence interval
388    pub fn confidence_interval(&self, confidence: f64) -> (f64, f64) {
389        let z_score = match confidence {
390            0.90 => 1.645,
391            0.95 => 1.96,
392            0.99 => 2.576,
393            _ => 1.96,
394        };
395
396        let std_error = (self.variance / self.samples_used as f64).sqrt();
397        let margin = z_score * std_error;
398
399        (self.estimate - margin, self.estimate + margin)
400    }
401}
402
403/// Importance sampling for focusing on high-importance regions
404pub struct ImportanceSampler {
405    proposal_distribution: Box<dyn Fn(&mut Random<rand::rngs::StdRng>) -> f64 + Send + Sync>,
406    target_density: Box<dyn Fn(f64) -> f64 + Send + Sync>,
407    proposal_density: Box<dyn Fn(f64) -> f64 + Send + Sync>,
408}
409
410impl ImportanceSampler {
411    /// Create importance sampler with custom proposal distribution
412    pub fn new<P, T, Q>(proposal: P, target_density: T, proposal_density: Q) -> Self
413    where
414        P: Fn(&mut Random<rand::rngs::StdRng>) -> f64 + Send + Sync + 'static,
415        T: Fn(f64) -> f64 + Send + Sync + 'static,
416        Q: Fn(f64) -> f64 + Send + Sync + 'static,
417    {
418        Self {
419            proposal_distribution: Box::new(proposal),
420            target_density: Box::new(target_density),
421            proposal_density: Box::new(proposal_density),
422        }
423    }
424
425    /// Estimate integral using importance sampling
426    pub fn estimate<F>(&self, function: F, num_samples: usize) -> Result<ImportanceResult, String>
427    where
428        F: Fn(f64) -> f64,
429    {
430        let mut rng = seeded_rng(42);
431        let mut weighted_sum = 0.0;
432        let mut weight_sum = 0.0;
433        let mut weights = Vec::with_capacity(num_samples);
434
435        for _ in 0..num_samples {
436            // Sample from proposal distribution
437            let x = (self.proposal_distribution)(&mut rng);
438
439            // Calculate importance weight
440            let target_val = (self.target_density)(x);
441            let proposal_val = (self.proposal_density)(x);
442
443            if proposal_val > 1e-10 {
444                let weight = target_val / proposal_val;
445                let function_val = function(x);
446
447                weighted_sum += weight * function_val;
448                weight_sum += weight;
449                weights.push(weight);
450            }
451        }
452
453        // Calculate effective sample size
454        let weight_sum_sq = weights.iter().map(|w| w * w).sum::<f64>();
455        let effective_sample_size = weight_sum * weight_sum / weight_sum_sq;
456
457        let estimate = if weight_sum > 1e-10 {
458            weighted_sum / weight_sum
459        } else {
460            return Err("Zero weight sum in importance sampling".to_string());
461        };
462
463        // Estimate variance
464        let mut variance_sum = 0.0;
465        let mut weight_index = 0;
466        let mut rng2 = seeded_rng(42); // Reset for consistent sampling
467
468        for _ in 0..num_samples {
469            let x = (self.proposal_distribution)(&mut rng2);
470            let target_val = (self.target_density)(x);
471            let proposal_val = (self.proposal_density)(x);
472
473            if proposal_val > 1e-10 && weight_index < weights.len() {
474                let weight = weights[weight_index];
475                let function_val = function(x);
476                let weighted_val = weight * function_val / weight_sum;
477                variance_sum += weight * (function_val - estimate).powi(2);
478                weight_index += 1;
479            }
480        }
481
482        let variance = variance_sum / (weight_sum * (num_samples - 1) as f64);
483
484        Ok(ImportanceResult {
485            estimate,
486            variance,
487            effective_sample_size,
488            total_samples: num_samples,
489        })
490    }
491}
492
493/// Result from importance sampling
494#[derive(Debug, Clone)]
495pub struct ImportanceResult {
496    pub estimate: f64,
497    pub variance: f64,
498    pub effective_sample_size: f64,
499    pub total_samples: usize,
500}
501
502/// Sequential Monte Carlo (Particle Filter) implementation
503#[derive(Debug)]
504pub struct SequentialMonteCarlo {
505    num_particles: usize,
506    resampling_threshold: f64,
507    particles: Vec<Particle>,
508}
509
510#[derive(Debug, Clone)]
511struct Particle {
512    state: Vec<f64>,
513    weight: f64,
514    log_weight: f64,
515}
516
517impl SequentialMonteCarlo {
518    /// Create new SMC sampler
519    pub fn new(num_particles: usize) -> Self {
520        Self {
521            num_particles,
522            resampling_threshold: 0.5,
523            particles: Vec::with_capacity(num_particles),
524        }
525    }
526
527    /// Initialize particles
528    pub fn initialize<F>(&mut self, mut initializer: F) -> Result<(), String>
529    where
530        F: FnMut(&mut Random<rand::rngs::StdRng>) -> Vec<f64>,
531    {
532        let mut rng = seeded_rng(42);
533        self.particles.clear();
534
535        for _ in 0..self.num_particles {
536            let state = initializer(&mut rng);
537            self.particles.push(Particle {
538                state,
539                weight: 1.0 / self.num_particles as f64,
540                log_weight: -(self.num_particles as f64).ln(),
541            });
542        }
543
544        Ok(())
545    }
546
547    /// Prediction step
548    pub fn predict<F>(&mut self, mut transition: F) -> Result<(), String>
549    where
550        F: FnMut(&Vec<f64>, &mut Random<rand::rngs::StdRng>) -> Vec<f64>,
551    {
552        let mut rng = seeded_rng(42);
553
554        for particle in &mut self.particles {
555            particle.state = transition(&particle.state, &mut rng);
556        }
557
558        Ok(())
559    }
560
561    /// Update step with observations
562    pub fn update<F>(&mut self, observation: &[f64], mut likelihood: F) -> Result<(), String>
563    where
564        F: FnMut(&Vec<f64>, &[f64]) -> f64,
565    {
566        // Update weights based on likelihood
567        let mut max_log_weight = f64::NEG_INFINITY;
568
569        for particle in &mut self.particles {
570            let likelihood_val = likelihood(&particle.state, observation);
571            particle.log_weight += likelihood_val.ln();
572            max_log_weight = max_log_weight.max(particle.log_weight);
573        }
574
575        // Normalize weights (log-sum-exp trick)
576        let mut weight_sum = 0.0;
577        for particle in &mut self.particles {
578            particle.weight = (particle.log_weight - max_log_weight).exp();
579            weight_sum += particle.weight;
580        }
581
582        for particle in &mut self.particles {
583            particle.weight /= weight_sum;
584        }
585
586        // Check if resampling is needed
587        let effective_sample_size = self.effective_sample_size();
588        if effective_sample_size < self.resampling_threshold * self.num_particles as f64 {
589            self.resample()?;
590        }
591
592        Ok(())
593    }
594
595    /// Calculate effective sample size
596    fn effective_sample_size(&self) -> f64 {
597        let weight_sum_sq: f64 = self.particles.iter().map(|p| p.weight.powi(2)).sum();
598        1.0 / weight_sum_sq
599    }
600
601    /// Systematic resampling
602    fn resample(&mut self) -> Result<(), String> {
603        let mut rng = seeded_rng(42);
604        let u0 = rng
605            .sample(Uniform::new(0.0, 1.0 / self.num_particles as f64).expect("Operation failed"));
606
607        let mut new_particles = Vec::with_capacity(self.num_particles);
608        let mut cumulative_weight = 0.0;
609        let mut i = 0;
610
611        for j in 0..self.num_particles {
612            let uj = u0 + j as f64 / self.num_particles as f64;
613
614            while cumulative_weight < uj && i < self.particles.len() {
615                cumulative_weight += self.particles[i].weight;
616                i += 1;
617            }
618
619            if i > 0 {
620                let mut new_particle = self.particles[i - 1].clone();
621                new_particle.weight = 1.0 / self.num_particles as f64;
622                new_particle.log_weight = -(self.num_particles as f64).ln();
623                new_particles.push(new_particle);
624            }
625        }
626
627        self.particles = new_particles;
628        Ok(())
629    }
630
631    /// Get current state estimate (weighted mean)
632    pub fn state_estimate(&self) -> Vec<f64> {
633        if self.particles.is_empty() {
634            return Vec::new();
635        }
636
637        let state_dim = self.particles[0].state.len();
638        let mut estimate = vec![0.0; state_dim];
639
640        for particle in &self.particles {
641            for (i, &val) in particle.state.iter().enumerate() {
642                estimate[i] += particle.weight * val;
643            }
644        }
645
646        estimate
647    }
648
649    /// Get covariance matrix of current state
650    pub fn state_covariance(&self) -> Array2<f64> {
651        let estimate = self.state_estimate();
652        let state_dim = estimate.len();
653        let mut covariance = Array2::zeros((state_dim, state_dim));
654
655        for particle in &self.particles {
656            for i in 0..state_dim {
657                for j in 0..state_dim {
658                    let diff_i = particle.state[i] - estimate[i];
659                    let diff_j = particle.state[j] - estimate[j];
660                    covariance[[i, j]] += particle.weight * diff_i * diff_j;
661                }
662            }
663        }
664
665        covariance
666    }
667}
668
669/// Adaptive Metropolis-Hastings sampler with automatic tuning
670#[derive(Debug)]
671pub struct AdaptiveMetropolisHastings {
672    target_acceptance_rate: f64,
673    adaptation_rate: f64,
674    proposal_covariance: Array2<f64>,
675    accepted_samples: usize,
676    total_proposals: usize,
677    state_history: VecDeque<Vec<f64>>,
678    adaptation_window: usize,
679}
680
681impl AdaptiveMetropolisHastings {
682    /// Create new adaptive MH sampler
683    pub fn new(dimension: usize, target_acceptance: f64) -> Self {
684        let mut proposal_cov = Array2::eye(dimension);
685        proposal_cov *= 0.1; // Small initial proposal variance
686
687        Self {
688            target_acceptance_rate: target_acceptance,
689            adaptation_rate: 0.01,
690            proposal_covariance: proposal_cov,
691            accepted_samples: 0,
692            total_proposals: 0,
693            state_history: VecDeque::new(),
694            adaptation_window: 100,
695        }
696    }
697
698    /// Sample from target distribution
699    pub fn sample<F>(
700        &mut self,
701        log_density: F,
702        initial_state: Vec<f64>,
703        num_samples: usize,
704    ) -> Result<Vec<Vec<f64>>, String>
705    where
706        F: Fn(&[f64]) -> f64,
707    {
708        let mut rng = seeded_rng(42);
709        let mut current_state = initial_state;
710        let mut current_log_density = log_density(&current_state);
711        let mut samples = Vec::with_capacity(num_samples);
712
713        // Create multivariate normal for proposals
714        let mut mvn = MultivariateNormal::new(
715            vec![0.0; current_state.len()],
716            self.array_to_vec2d(&self.proposal_covariance),
717        )
718        .map_err(|e| format!("Failed to create MVN: {}", e))?;
719
720        for i in 0..num_samples {
721            // Generate proposal
722            let proposal_delta = mvn.sample(&mut rng);
723            let proposal_state: Vec<f64> = current_state
724                .iter()
725                .zip(proposal_delta.iter())
726                .map(|(&curr, &delta)| curr + delta)
727                .collect();
728
729            // Evaluate proposal
730            let proposal_log_density = log_density(&proposal_state);
731
732            // Acceptance probability
733            let log_alpha = proposal_log_density - current_log_density;
734            let accept = if log_alpha >= 0.0 {
735                true
736            } else {
737                let u: f64 = rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"));
738                u.ln() < log_alpha
739            };
740
741            self.total_proposals += 1;
742
743            if accept {
744                current_state = proposal_state;
745                current_log_density = proposal_log_density;
746                self.accepted_samples += 1;
747            }
748
749            samples.push(current_state.clone());
750            self.state_history.push_back(current_state.clone());
751
752            // Adapt proposal covariance
753            if i > 0 && i % 50 == 0 {
754                self.adapt_proposal_covariance();
755            }
756
757            // Maintain history window
758            if self.state_history.len() > self.adaptation_window {
759                self.state_history.pop_front();
760            }
761        }
762
763        Ok(samples)
764    }
765
766    /// Adapt proposal covariance based on recent samples
767    fn adapt_proposal_covariance(&mut self) {
768        if self.state_history.len() < 10 {
769            return;
770        }
771
772        // Calculate current acceptance rate
773        let acceptance_rate = self.accepted_samples as f64 / self.total_proposals as f64;
774
775        // Adapt based on acceptance rate
776        let scale_factor = if acceptance_rate > self.target_acceptance_rate {
777            1.0 + self.adaptation_rate
778        } else {
779            1.0 - self.adaptation_rate
780        };
781
782        // Scale proposal covariance
783        self.proposal_covariance *= scale_factor.powi(2);
784
785        // Update covariance based on sample history
786        if self.state_history.len() > 20 {
787            let sample_cov = self.calculate_sample_covariance();
788            let learning_rate = 0.05;
789
790            for i in 0..self.proposal_covariance.nrows() {
791                for j in 0..self.proposal_covariance.ncols() {
792                    self.proposal_covariance[[i, j]] = (1.0 - learning_rate)
793                        * self.proposal_covariance[[i, j]]
794                        + learning_rate * sample_cov[[i, j]];
795                }
796            }
797        }
798    }
799
800    /// Calculate sample covariance from history
801    fn calculate_sample_covariance(&self) -> Array2<f64> {
802        let n = self.state_history.len();
803        let dim = self.state_history[0].len();
804
805        // Calculate mean
806        let mut mean = vec![0.0; dim];
807        for state in &self.state_history {
808            for (i, &val) in state.iter().enumerate() {
809                mean[i] += val;
810            }
811        }
812        for val in &mut mean {
813            *val /= n as f64;
814        }
815
816        // Calculate covariance
817        let mut cov = Array2::zeros((dim, dim));
818        for state in &self.state_history {
819            for i in 0..dim {
820                for j in 0..dim {
821                    let diff_i = state[i] - mean[i];
822                    let diff_j = state[j] - mean[j];
823                    cov[[i, j]] += diff_i * diff_j;
824                }
825            }
826        }
827
828        cov /= (n - 1) as f64;
829        cov
830    }
831
832    /// Convert Array2 to Vec<Vec<f64>> for MultivariateNormal
833    fn array_to_vec2d(&self, array: &Array2<f64>) -> Vec<Vec<f64>> {
834        array.rows().into_iter().map(|row| row.to_vec()).collect()
835    }
836
837    /// Get current acceptance rate
838    pub fn acceptance_rate(&self) -> f64 {
839        if self.total_proposals > 0 {
840            self.accepted_samples as f64 / self.total_proposals as f64
841        } else {
842            0.0
843        }
844    }
845}
846
847#[cfg(test)]
848mod tests {
849    use super::*;
850    use approx::assert_relative_eq;
851
852    #[test]
853    fn test_mlmc_basic() {
854        let mlmc = MultiLevelMonteCarlo::new(3, 100);
855
856        let result = mlmc
857            .estimate(|level, samples| {
858                // Simple test function: E[X] = 0.5 at all levels
859                let mut rng = seeded_rng(42 + level as u64);
860                let mut vals = Vec::with_capacity(samples);
861                for _ in 0..samples {
862                    vals.push(rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")));
863                }
864                Ok(vals)
865            })
866            .expect("Operation failed");
867
868        assert_relative_eq!(result.estimate, 0.5, epsilon = 0.1);
869        assert!(result.levels_used > 0);
870        assert!(result.total_samples > 0);
871    }
872
873    #[test]
874    fn test_adaptive_sampler() {
875        let mut sampler = AdaptiveSampler::new(0.05, 10000);
876
877        let result = sampler
878            .sample_until_convergence(|rng| {
879                // Sample from standard normal
880                rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"))
881            })
882            .expect("Operation failed");
883
884        assert_relative_eq!(result.estimate, 0.0, epsilon = 0.1);
885        assert!(result.samples_used > 0);
886    }
887
888    #[test]
889    fn test_importance_sampling() {
890        let sampler = ImportanceSampler::new(
891            |rng| rng.sample(Normal::new(1.0, 1.0).expect("Operation failed")), // Proposal: N(1,1)
892            |x| (-0.5 * x * x).exp(), // Target: N(0,1) density (unnormalized)
893            |x| (-0.5 * (x - 1.0).powi(2)).exp(), // Proposal density (unnormalized)
894        );
895
896        let result = sampler.estimate(|x| x, 1000).expect("Operation failed");
897
898        // Should estimate E[X] under N(0,1), which is 0
899        assert_relative_eq!(result.estimate, 0.0, epsilon = 0.3);
900        assert!(result.effective_sample_size > 0.0);
901    }
902
903    #[test]
904    fn test_sequential_monte_carlo() {
905        let mut smc = SequentialMonteCarlo::new(100);
906
907        // Initialize with standard normal
908        smc.initialize(|rng| vec![rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"))])
909            .expect("Operation failed");
910
911        // Predict step (random walk)
912        smc.predict(|state, rng| {
913            let noise = rng.sample(Normal::new(0.0, 0.1).expect("Operation failed"));
914            vec![state[0] + noise]
915        })
916        .expect("Operation failed");
917
918        // Update with observation
919        let observation = vec![0.5];
920        smc.update(&observation, |state, obs| {
921            // Gaussian likelihood
922            let diff = state[0] - obs[0];
923            (-0.5 * diff * diff).exp()
924        })
925        .expect("Operation failed");
926
927        let estimate = smc.state_estimate();
928        assert_eq!(estimate.len(), 1);
929        // Should be pulled towards observation
930        assert!(estimate[0].abs() < 2.0);
931    }
932
933    #[test]
934    #[ignore] // Flaky statistical test - can fail due to random variance
935    fn test_adaptive_metropolis_hastings() {
936        let mut amh = AdaptiveMetropolisHastings::new(2, 0.44);
937
938        let samples = amh
939            .sample(
940                |state| {
941                    // Standard 2D normal log-density
942                    -0.5 * (state[0].powi(2) + state[1].powi(2))
943                },
944                vec![0.0, 0.0],
945                1000,
946            )
947            .expect("Operation failed");
948
949        assert_eq!(samples.len(), 1000);
950        assert!(amh.acceptance_rate() > 0.1);
951        assert!(amh.acceptance_rate() < 0.9);
952
953        // Check that samples are roughly centered at origin
954        let mean_x: f64 = samples.iter().map(|s| s[0]).sum::<f64>() / samples.len() as f64;
955        let mean_y: f64 = samples.iter().map(|s| s[1]).sum::<f64>() / samples.len() as f64;
956
957        assert_relative_eq!(mean_x, 0.0, epsilon = 0.2);
958        assert_relative_eq!(mean_y, 0.0, epsilon = 0.2);
959    }
960}