Skip to main content

ringkernel_montecarlo/variance/
control.rs

1//! Control variates for variance reduction.
2//!
3//! Control variates reduce variance by using a correlated variable with
4//! known expectation. If Y is correlated with X and E[Y] is known, then:
5//!
6//! X* = X - c(Y - E[Y])
7//!
8//! is an unbiased estimator of E[X] with potentially lower variance.
9
10use crate::rng::GpuRng;
11
12/// Estimate using control variates.
13///
14/// Given samples of X and Y where E[Y] = mu_y is known,
15/// computes the control variate estimator:
16///
17/// `X* = mean(X) - c * (mean(Y) - mu_y)`
18///
19/// where c is the optimal coefficient `cov(X,Y) / var(Y)`.
20///
21/// # Arguments
22///
23/// * `x_samples` - Samples of the quantity to estimate
24/// * `y_samples` - Samples of the control variate (same length as x_samples)
25/// * `mu_y` - Known mean of Y
26///
27/// # Returns
28///
29/// Tuple of (estimate, optimal_c, variance_reduction_factor)
30pub fn control_variate_estimate(
31    x_samples: &[f32],
32    y_samples: &[f32],
33    mu_y: f32,
34) -> (f32, f32, f32) {
35    let n = x_samples.len();
36    assert_eq!(n, y_samples.len(), "Sample arrays must have same length");
37    assert!(n > 1, "Need at least 2 samples");
38
39    let n_f = n as f32;
40
41    // Compute means
42    let mean_x: f32 = x_samples.iter().sum::<f32>() / n_f;
43    let mean_y: f32 = y_samples.iter().sum::<f32>() / n_f;
44
45    // Compute variance of Y and covariance of X, Y
46    let mut var_y = 0.0;
47    let mut cov_xy = 0.0;
48    let mut var_x = 0.0;
49
50    for i in 0..n {
51        let dx = x_samples[i] - mean_x;
52        let dy = y_samples[i] - mean_y;
53        var_x += dx * dx;
54        var_y += dy * dy;
55        cov_xy += dx * dy;
56    }
57
58    var_x /= n_f - 1.0;
59    var_y /= n_f - 1.0;
60    cov_xy /= n_f - 1.0;
61
62    // Optimal coefficient
63    let c = if var_y > 1e-10 { cov_xy / var_y } else { 0.0 };
64
65    // Control variate estimate
66    let estimate = mean_x - c * (mean_y - mu_y);
67
68    // Variance reduction factor (R² of regression)
69    let r_squared = if var_x > 1e-10 && var_y > 1e-10 {
70        (cov_xy * cov_xy) / (var_x * var_y)
71    } else {
72        0.0
73    };
74
75    // Variance reduction: Var(X*) = Var(X) * (1 - R²)
76    let variance_reduction = 1.0 - r_squared;
77
78    (estimate, c, variance_reduction)
79}
80
81/// Configuration for control variates estimator.
82#[derive(Debug, Clone)]
83pub struct ControlVariates {
84    /// Number of samples to generate.
85    pub n_samples: usize,
86}
87
88impl ControlVariates {
89    /// Create new control variates configuration.
90    pub fn new(n_samples: usize) -> Self {
91        Self { n_samples }
92    }
93
94    /// Estimate E[f(U)] using control variate g(U) with known mean mu_g.
95    ///
96    /// # Arguments
97    ///
98    /// * `state` - RNG state
99    /// * `f` - Function to estimate expectation of
100    /// * `g` - Control variate function
101    /// * `mu_g` - Known mean E[g(U)]
102    ///
103    /// # Returns
104    ///
105    /// Tuple of (estimate, optimal_c, variance_reduction)
106    pub fn estimate<R: GpuRng, F, G>(
107        &self,
108        state: &mut R::State,
109        f: F,
110        g: G,
111        mu_g: f32,
112    ) -> (f32, f32, f32)
113    where
114        F: Fn(f32) -> f32,
115        G: Fn(f32) -> f32,
116    {
117        let mut x_samples = Vec::with_capacity(self.n_samples);
118        let mut y_samples = Vec::with_capacity(self.n_samples);
119
120        for _ in 0..self.n_samples {
121            let u = R::next_uniform(state);
122            x_samples.push(f(u));
123            y_samples.push(g(u));
124        }
125
126        control_variate_estimate(&x_samples, &y_samples, mu_g)
127    }
128}
129
130impl Default for ControlVariates {
131    fn default() -> Self {
132        Self::new(1000)
133    }
134}
135
136#[cfg(test)]
137mod tests {
138    use super::*;
139    use crate::rng::PhiloxRng;
140
141    #[test]
142    fn test_control_variate_perfect_correlation() {
143        // If Y = X, then c = 1 and estimate should equal mu_y
144        let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
145        let y = x.clone();
146        let mu_y = 3.0; // E[Y] = 3
147
148        let (estimate, c, _) = control_variate_estimate(&x, &y, mu_y);
149
150        assert!(
151            (c - 1.0).abs() < 1e-6,
152            "c should be 1 for perfect correlation"
153        );
154        assert!(
155            (estimate - mu_y).abs() < 1e-6,
156            "Estimate should equal mu_y when Y = X"
157        );
158    }
159
160    #[test]
161    fn test_control_variate_reduces_variance() {
162        // Estimate E[e^U] using U as control variate with E[U] = 0.5
163        let n = 5000;
164
165        // Without control variate
166        let mut state = PhiloxRng::seed(42, 0);
167        let naive_samples: Vec<f32> = (0..n)
168            .map(|_| PhiloxRng::next_uniform(&mut state).exp())
169            .collect();
170        let naive_mean: f32 = naive_samples.iter().sum::<f32>() / n as f32;
171
172        // With control variate
173        let mut state = PhiloxRng::seed(42, 0);
174        let cv = ControlVariates::new(n);
175        let (cv_estimate, _c, var_reduction) =
176            cv.estimate::<PhiloxRng, _, _>(&mut state, |u| u.exp(), |u| u, 0.5);
177
178        // True value: E[e^U] = e - 1 ≈ 1.718
179        let true_value = std::f32::consts::E - 1.0;
180
181        // Both should be reasonably close
182        assert!(
183            (naive_mean - true_value).abs() < 0.1,
184            "Naive {} far from {}",
185            naive_mean,
186            true_value
187        );
188        assert!(
189            (cv_estimate - true_value).abs() < 0.1,
190            "CV {} far from {}",
191            cv_estimate,
192            true_value
193        );
194
195        // Variance should be reduced (var_reduction < 1)
196        assert!(
197            var_reduction < 1.0,
198            "Control variate should reduce variance"
199        );
200    }
201}