Skip to main content

scirs2_core/random/
variance_reduction.rs

1//! Variance reduction techniques for Monte Carlo methods
2//!
3//! This module provides advanced variance reduction techniques that are essential
4//! for efficient Monte Carlo simulations in scientific computing applications.
5
6use crate::random::core::Random;
7use rand::seq::SliceRandom;
8use rand::Rng;
9use rand_distr::{Distribution, Uniform};
10use std::collections::HashMap;
11
12/// Antithetic variate sampling for variance reduction
13///
14/// Antithetic variates reduce variance by using negatively correlated samples.
15/// For uniform random variables U, the antithetic variate is 1-U.
16#[derive(Debug)]
17pub struct AntitheticSampling<R: Rng> {
18    rng: Random<R>,
19    #[allow(dead_code)]
20    stored_samples: HashMap<usize, Vec<f64>>,
21}
22
23impl<R: Rng> AntitheticSampling<R> {
24    /// Create a new antithetic sampling generator
25    pub fn new(rng: Random<R>) -> Self {
26        Self {
27            rng,
28            stored_samples: HashMap::new(),
29        }
30    }
31
32    /// Generate antithetic pairs of samples
33    ///
34    /// Returns (original_samples, antithetic_samples) where `antithetic[i]` = 1 - `original[i]`
35    pub fn generate_antithetic_pairs(&mut self, count: usize) -> (Vec<f64>, Vec<f64>) {
36        let original: Vec<f64> = (0..count)
37            .map(|_| {
38                self.rng
39                    .sample(Uniform::new(0.0, 1.0).expect("Operation failed"))
40            })
41            .collect();
42
43        let antithetic: Vec<f64> = original.iter().map(|&x| 1.0 - x).collect();
44
45        (original, antithetic)
46    }
47
48    /// Generate stratified samples for variance reduction
49    ///
50    /// Divides the unit interval into strata and samples uniformly within each stratum.
51    /// This reduces variance compared to pure random sampling.
52    pub fn stratified_samples(&mut self, strata: usize, samples_per_stratum: usize) -> Vec<f64> {
53        let mut all_samples = Vec::new();
54
55        for i in 0..strata {
56            let stratum_start = i as f64 / strata as f64;
57            let stratum_end = (i + 1) as f64 / strata as f64;
58
59            for _ in 0..samples_per_stratum {
60                let uniform_in_stratum = self
61                    .rng
62                    .sample(Uniform::new(0.0, 1.0).expect("Operation failed"));
63                let sample = stratum_start + uniform_in_stratum * (stratum_end - stratum_start);
64                all_samples.push(sample);
65            }
66        }
67
68        all_samples.shuffle(&mut self.rng.rng);
69        all_samples
70    }
71
72    /// Generate Latin Hypercube samples
73    ///
74    /// A variance reduction technique that ensures samples are well-distributed
75    /// across all dimensions of the parameter space.
76    pub fn latin_hypercube_samples(
77        &mut self,
78        dimensions: usize,
79        sample_count: usize,
80    ) -> Vec<Vec<f64>> {
81        let mut samples = vec![vec![0.0; dimensions]; sample_count];
82
83        for dim in 0..dimensions {
84            // Create stratified samples for this dimension
85            let mut strata: Vec<f64> = (0..sample_count)
86                .map(|i| {
87                    (i as f64
88                        + self
89                            .rng
90                            .sample(Uniform::new(0.0, 1.0).expect("Operation failed")))
91                        / sample_count as f64
92                })
93                .collect();
94
95            // Shuffle the strata for this dimension
96            strata.shuffle(&mut self.rng.rng);
97
98            // Assign to samples
99            for (i, &value) in strata.iter().enumerate() {
100                samples[i][dim] = value;
101            }
102        }
103
104        samples
105    }
106}
107
108impl AntitheticSampling<rand::rngs::ThreadRng> {
109    /// Create antithetic sampling with default RNG
110    pub fn with_default_rng() -> Self {
111        Self::new(Random::default())
112    }
113}
114
115/// Control variate method for variance reduction
116///
117/// Control variates use a correlated random variable with known expectation
118/// to reduce the variance of the estimator.
119#[derive(Debug)]
120pub struct ControlVariate {
121    control_mean: f64,
122    optimal_coefficient: Option<f64>,
123}
124
125impl ControlVariate {
126    /// Create a new control variate method
127    ///
128    /// # Parameters
129    /// * `control_mean` - The known expectation of the control variate
130    pub fn new(control_mean: f64) -> Self {
131        Self {
132            control_mean,
133            optimal_coefficient: None,
134        }
135    }
136
137    /// Convenience constructor for control variate with given mean
138    pub fn mean(mean: f64) -> Self {
139        Self::new(mean)
140    }
141
142    /// Estimate the optimal control coefficient
143    ///
144    /// Uses the sample covariance and variance to estimate the optimal coefficient
145    /// that minimizes the variance of the control variate estimator.
146    pub fn estimate_coefficient(&mut self, target_samples: &[f64], control_samples: &[f64]) {
147        let n = target_samples.len() as f64;
148
149        let target_mean = target_samples.iter().sum::<f64>() / n;
150        let control_sample_mean = control_samples.iter().sum::<f64>() / n;
151
152        let numerator: f64 = target_samples
153            .iter()
154            .zip(control_samples.iter())
155            .map(|(&y, &x)| (y - target_mean) * (x - control_sample_mean))
156            .sum();
157
158        let denominator: f64 = control_samples
159            .iter()
160            .map(|&x| (x - control_sample_mean).powi(2))
161            .sum();
162
163        if denominator > 0.0 {
164            self.optimal_coefficient = Some(numerator / denominator);
165        }
166    }
167
168    /// Apply control variate correction
169    ///
170    /// Applies the control variate correction using the formula:
171    /// Y_corrected = Y - c * (X - μ_X)
172    /// where c is the optimal coefficient, X is the control variate, and μ_X is its mean.
173    pub fn apply_correction(&self, target_samples: &[f64], control_samples: &[f64]) -> Vec<f64> {
174        if let Some(c) = self.optimal_coefficient {
175            target_samples
176                .iter()
177                .zip(control_samples.iter())
178                .map(|(&y, &x)| y - c * (x - self.control_mean))
179                .collect()
180        } else {
181            target_samples.to_vec()
182        }
183    }
184
185    /// Get the current optimal coefficient
186    pub fn coefficient(&self) -> Option<f64> {
187        self.optimal_coefficient
188    }
189
190    /// Get the control mean
191    pub fn control_mean(&self) -> f64 {
192        self.control_mean
193    }
194}
195
196/// Common ratio for variance reduction
197///
198/// Uses the known ratio between two correlated estimators to reduce variance.
199#[derive(Debug)]
200pub struct CommonRatio {
201    known_ratio: f64,
202}
203
204impl CommonRatio {
205    /// Create a new common ratio variance reduction method
206    pub fn new(known_ratio: f64) -> Self {
207        Self { known_ratio }
208    }
209
210    /// Apply common ratio correction
211    ///
212    /// Uses the formula: Y_corrected = Y * (known_ratio / sample_ratio)
213    /// where sample_ratio is estimated from the sample data.
214    pub fn apply_correction(
215        &self,
216        numerator_samples: &[f64],
217        denominator_samples: &[f64],
218    ) -> Vec<f64> {
219        let n = numerator_samples.len();
220        if n != denominator_samples.len() {
221            return numerator_samples.to_vec();
222        }
223
224        let numerator_sum: f64 = numerator_samples.iter().sum();
225        let denominator_sum: f64 = denominator_samples.iter().sum();
226
227        if denominator_sum != 0.0 {
228            let sample_ratio = numerator_sum / denominator_sum;
229            let correction_factor = self.known_ratio / sample_ratio;
230            numerator_samples
231                .iter()
232                .map(|&x| x * correction_factor)
233                .collect()
234        } else {
235            numerator_samples.to_vec()
236        }
237    }
238}
239
240/// Importance splitting for rare event simulation
241///
242/// A variance reduction technique for estimating probabilities of rare events
243/// by splitting the simulation into multiple levels.
244#[derive(Debug)]
245pub struct ImportanceSplitting<R: Rng> {
246    rng: Random<R>,
247    levels: Vec<f64>,
248    splitting_factor: usize,
249}
250
251impl<R: Rng> ImportanceSplitting<R> {
252    /// Create a new importance splitting method
253    pub fn new(rng: Random<R>, levels: Vec<f64>, splitting_factor: usize) -> Self {
254        Self {
255            rng,
256            levels,
257            splitting_factor,
258        }
259    }
260
261    /// Simulate rare event probability using importance splitting
262    ///
263    /// Returns an estimate of the probability of reaching the final level.
264    pub fn estimate_probability<F>(&mut self, initial_samples: usize, simulation_fn: F) -> f64
265    where
266        F: Fn(&mut Random<R>) -> f64,
267    {
268        let mut current_samples = initial_samples;
269        let mut probability = 1.0;
270
271        for &level in &self.levels {
272            let mut successes = 0;
273            let mut new_samples = Vec::new();
274
275            // Run simulations at current level
276            for _ in 0..current_samples {
277                let result = simulation_fn(&mut self.rng);
278                if result >= level {
279                    successes += 1;
280                    new_samples.push(result);
281                }
282            }
283
284            if successes == 0 {
285                return 0.0; // No samples reached this level
286            }
287
288            let level_probability = successes as f64 / current_samples as f64;
289            probability *= level_probability;
290
291            // Split successful samples for next level
292            current_samples = successes * self.splitting_factor;
293        }
294
295        probability
296    }
297}
298
299impl ImportanceSplitting<rand::rngs::ThreadRng> {
300    /// Create importance splitting with default RNG
301    pub fn with_default_rng(levels: Vec<f64>, splitting_factor: usize) -> Self {
302        Self::new(Random::default(), levels, splitting_factor)
303    }
304}
305
306#[cfg(test)]
307mod tests {
308    use super::*;
309    use crate::random::core::seeded_rng;
310    use approx::assert_abs_diff_eq;
311
312    #[test]
313    fn test_antithetic_sampling() {
314        let mut antithetic = AntitheticSampling::new(seeded_rng(42));
315        let (original, antithetic_vals) = antithetic.generate_antithetic_pairs(10);
316
317        assert_eq!(original.len(), 10);
318        assert_eq!(antithetic_vals.len(), 10);
319
320        for (o, a) in original.iter().zip(antithetic_vals.iter()) {
321            assert_abs_diff_eq!(o + a, 1.0, epsilon = 1e-10);
322        }
323    }
324
325    #[test]
326    fn test_stratified_sampling() {
327        let mut antithetic = AntitheticSampling::new(seeded_rng(123));
328        let samples = antithetic.stratified_samples(5, 10);
329
330        assert_eq!(samples.len(), 50); // 5 strata * 10 samples per stratum
331        assert!(samples.iter().all(|&x| (0.0..1.0).contains(&x)));
332    }
333
334    #[test]
335    fn test_latin_hypercube_sampling() {
336        let mut antithetic = AntitheticSampling::new(seeded_rng(456));
337        let samples = antithetic.latin_hypercube_samples(3, 10);
338
339        assert_eq!(samples.len(), 10); // 10 samples
340        assert!(samples.iter().all(|sample| sample.len() == 3)); // 3 dimensions
341        assert!(samples.iter().flatten().all(|&x| (0.0..1.0).contains(&x)));
342    }
343
344    #[test]
345    fn test_control_variate() {
346        let mut control = ControlVariate::new(0.5);
347
348        let target = vec![0.1, 0.3, 0.7, 0.9];
349        let control_samples = vec![0.2, 0.4, 0.6, 0.8];
350
351        control.estimate_coefficient(&target, &control_samples);
352        let corrected = control.apply_correction(&target, &control_samples);
353
354        assert_eq!(corrected.len(), target.len());
355        assert!(control.coefficient().is_some());
356    }
357
358    #[test]
359    fn test_common_ratio() {
360        let ratio = CommonRatio::new(2.0);
361        let numerator = vec![1.0, 2.0, 3.0, 4.0];
362        let denominator = vec![0.6, 1.2, 1.8, 2.4]; // Should give ratio ≈ 1.67
363
364        let corrected = ratio.apply_correction(&numerator, &denominator);
365        assert_eq!(corrected.len(), numerator.len());
366    }
367
368    #[test]
369    fn test_importance_splitting() {
370        let levels = vec![0.5, 0.8, 0.95];
371        let mut splitting = ImportanceSplitting::new(seeded_rng(789), levels, 2);
372
373        let prob = splitting.estimate_probability(100, |rng| {
374            rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"))
375        });
376
377        assert!((0.0..=1.0).contains(&prob));
378    }
379}