Skip to main content

proof_engine/stochastic/
monte_carlo.rs

1//! Monte Carlo simulation framework.
2//!
3//! Generic simulation runner, histogram, importance sampling, pi estimation,
4//! and numerical integration.
5
6use super::brownian::Rng;
7use glam::Vec2;
8
9// ---------------------------------------------------------------------------
10// Histogram
11// ---------------------------------------------------------------------------
12
13/// Histogram with uniform bins.
14pub struct Histogram {
15    pub bins: Vec<u32>,
16    pub min: f64,
17    pub max: f64,
18    pub bin_count: usize,
19}
20
21impl Histogram {
22    /// Create from samples with the given number of bins.
23    pub fn from_samples(samples: &[f64], bin_count: usize) -> Self {
24        if samples.is_empty() || bin_count == 0 {
25            return Self {
26                bins: vec![0; bin_count.max(1)],
27                min: 0.0,
28                max: 1.0,
29                bin_count: bin_count.max(1),
30            };
31        }
32
33        let min = samples.iter().cloned().fold(f64::INFINITY, f64::min);
34        let max = samples.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
35        let range = (max - min).max(1e-15);
36        let mut bins = vec![0u32; bin_count];
37
38        for &s in samples {
39            let idx = ((s - min) / range * bin_count as f64) as usize;
40            let idx = idx.min(bin_count - 1);
41            bins[idx] += 1;
42        }
43
44        Self { bins, min, max, bin_count }
45    }
46
47    /// Create with explicit bounds.
48    pub fn from_samples_bounded(samples: &[f64], bin_count: usize, min: f64, max: f64) -> Self {
49        let range = (max - min).max(1e-15);
50        let mut bins = vec![0u32; bin_count];
51        for &s in samples {
52            if s >= min && s <= max {
53                let idx = ((s - min) / range * bin_count as f64) as usize;
54                let idx = idx.min(bin_count - 1);
55                bins[idx] += 1;
56            }
57        }
58        Self { bins, min, max, bin_count }
59    }
60
61    /// Bin width.
62    pub fn bin_width(&self) -> f64 {
63        (self.max - self.min) / self.bin_count as f64
64    }
65
66    /// Center of bin i.
67    pub fn bin_center(&self, i: usize) -> f64 {
68        self.min + (i as f64 + 0.5) * self.bin_width()
69    }
70
71    /// Normalized density (area = 1) for bin i.
72    pub fn density(&self, i: usize) -> f64 {
73        let total: u32 = self.bins.iter().sum();
74        if total == 0 {
75            return 0.0;
76        }
77        self.bins[i] as f64 / (total as f64 * self.bin_width())
78    }
79
80    /// Maximum bin count.
81    pub fn max_count(&self) -> u32 {
82        self.bins.iter().cloned().max().unwrap_or(0)
83    }
84}
85
86// ---------------------------------------------------------------------------
87// MonteCarloResult
88// ---------------------------------------------------------------------------
89
90/// Result of a Monte Carlo simulation.
91pub struct MonteCarloResult<T> {
92    pub mean: f64,
93    pub variance: f64,
94    pub std_dev: f64,
95    pub confidence_interval: (f64, f64),
96    pub histogram: Histogram,
97    pub samples: Vec<T>,
98}
99
100impl<T> MonteCarloResult<T> {
101    /// Standard error of the mean.
102    pub fn standard_error(&self) -> f64 {
103        if self.samples.is_empty() {
104            return 0.0;
105        }
106        self.std_dev / (self.samples.len() as f64).sqrt()
107    }
108}
109
110// ---------------------------------------------------------------------------
111// MonteCarloSim
112// ---------------------------------------------------------------------------
113
114/// Generic Monte Carlo simulation framework.
115pub struct MonteCarloSim<T> {
116    _phantom: std::marker::PhantomData<T>,
117}
118
119impl<T: Clone> MonteCarloSim<T> {
120    /// Run `trials` independent simulations, extracting a f64 value from each.
121    /// Returns full statistics.
122    pub fn run<F>(trials: usize, mut trial_fn: F) -> MonteCarloResult<T>
123    where
124        F: FnMut(usize) -> (T, f64),
125    {
126        let mut samples = Vec::with_capacity(trials);
127        let mut values = Vec::with_capacity(trials);
128
129        for i in 0..trials {
130            let (sample, value) = trial_fn(i);
131            samples.push(sample);
132            values.push(value);
133        }
134
135        let n = trials as f64;
136        let mean = values.iter().sum::<f64>() / n;
137        let variance = values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / n;
138        let std_dev = variance.sqrt();
139
140        // 95% confidence interval using z = 1.96
141        let se = std_dev / n.sqrt();
142        let confidence_interval = (mean - 1.96 * se, mean + 1.96 * se);
143
144        let histogram = Histogram::from_samples(&values, 50);
145
146        MonteCarloResult {
147            mean,
148            variance,
149            std_dev,
150            confidence_interval,
151            histogram,
152            samples,
153        }
154    }
155}
156
157// ---------------------------------------------------------------------------
158// Convenience functions
159// ---------------------------------------------------------------------------
160
161/// Estimate pi using the classic Monte Carlo method:
162/// sample uniform points in [0,1]^2, count fraction inside unit circle.
163pub fn estimate_pi(trials: usize, rng: &mut Rng) -> f64 {
164    let mut inside = 0usize;
165    for _ in 0..trials {
166        let x = rng.uniform();
167        let y = rng.uniform();
168        if x * x + y * y <= 1.0 {
169            inside += 1;
170        }
171    }
172    4.0 * inside as f64 / trials as f64
173}
174
175/// Monte Carlo integration of f over [a, b].
176/// Estimate = (b - a) * mean(f(x)) for x uniform in [a, b].
177pub fn integrate(f: &dyn Fn(f64) -> f64, a: f64, b: f64, trials: usize, rng: &mut Rng) -> f64 {
178    let range = b - a;
179    let sum: f64 = (0..trials)
180        .map(|_| {
181            let x = a + rng.uniform() * range;
182            f(x)
183        })
184        .sum();
185    range * sum / trials as f64
186}
187
188/// Monte Carlo integration in 2D: integral of f over [a1,b1] x [a2,b2].
189pub fn integrate_2d(
190    f: &dyn Fn(f64, f64) -> f64,
191    a1: f64, b1: f64,
192    a2: f64, b2: f64,
193    trials: usize,
194    rng: &mut Rng,
195) -> f64 {
196    let area = (b1 - a1) * (b2 - a2);
197    let sum: f64 = (0..trials)
198        .map(|_| {
199            let x = a1 + rng.uniform() * (b1 - a1);
200            let y = a2 + rng.uniform() * (b2 - a2);
201            f(x, y)
202        })
203        .sum();
204    area * sum / trials as f64
205}
206
207/// Importance sampling: estimate E_target[h(x)] using proposal distribution.
208///
209/// * `target_density` - target PDF (unnormalized is fine if ratio is used)
210/// * `proposal_sample` - function that samples from proposal distribution
211/// * `proposal_density` - proposal PDF
212/// * `h` - function to compute expectation of
213/// * `trials` - number of samples
214pub fn importance_sampling(
215    target_density: &dyn Fn(f64) -> f64,
216    proposal_sample: &dyn Fn(&mut Rng) -> f64,
217    proposal_density: &dyn Fn(f64) -> f64,
218    h: &dyn Fn(f64) -> f64,
219    trials: usize,
220    rng: &mut Rng,
221) -> f64 {
222    let mut weighted_sum = 0.0;
223    let mut weight_sum = 0.0;
224
225    for _ in 0..trials {
226        let x = proposal_sample(rng);
227        let w = target_density(x) / proposal_density(x).max(1e-15);
228        weighted_sum += w * h(x);
229        weight_sum += w;
230    }
231
232    if weight_sum.abs() < 1e-15 {
233        0.0
234    } else {
235        weighted_sum / weight_sum
236    }
237}
238
239/// Compute running mean for convergence analysis.
240pub fn running_mean(values: &[f64]) -> Vec<f64> {
241    let mut means = Vec::with_capacity(values.len());
242    let mut sum = 0.0;
243    for (i, &v) in values.iter().enumerate() {
244        sum += v;
245        means.push(sum / (i + 1) as f64);
246    }
247    means
248}
249
250// ---------------------------------------------------------------------------
251// MonteCarloRenderer
252// ---------------------------------------------------------------------------
253
254/// Render histogram and convergence plot as glyphs.
255pub struct MonteCarloRenderer {
256    pub bar_character: char,
257    pub bar_color: [f32; 4],
258    pub convergence_character: char,
259    pub convergence_color: [f32; 4],
260    pub x_scale: f32,
261    pub y_scale: f32,
262}
263
264impl MonteCarloRenderer {
265    pub fn new() -> Self {
266        Self {
267            bar_character: 'โ–ˆ',
268            bar_color: [0.3, 0.8, 0.5, 0.9],
269            convergence_character: 'ยท',
270            convergence_color: [1.0, 0.6, 0.2, 1.0],
271            x_scale: 0.5,
272            y_scale: 0.1,
273        }
274    }
275
276    /// Render a histogram as vertical bars of glyphs.
277    pub fn render_histogram(&self, hist: &Histogram) -> Vec<(Vec2, char, [f32; 4])> {
278        let mut glyphs = Vec::new();
279        let max_count = hist.max_count().max(1) as f32;
280
281        for (i, &count) in hist.bins.iter().enumerate() {
282            let x = i as f32 * self.x_scale;
283            let height = (count as f32 / max_count * 20.0) as usize;
284            for h in 0..height {
285                glyphs.push((
286                    Vec2::new(x, h as f32 * self.y_scale),
287                    self.bar_character,
288                    self.bar_color,
289                ));
290            }
291        }
292        glyphs
293    }
294
295    /// Render a convergence plot (running mean vs. trial number).
296    pub fn render_convergence(
297        &self,
298        running_means: &[f64],
299        target: f64,
300    ) -> Vec<(Vec2, char, [f32; 4])> {
301        let mut glyphs = Vec::new();
302        let n = running_means.len();
303        let x_step = if n > 500 { n / 500 } else { 1 };
304
305        for i in (0..n).step_by(x_step) {
306            let x = i as f32 * 0.01;
307            let y = running_means[i] as f32;
308            glyphs.push((Vec2::new(x, y), self.convergence_character, self.convergence_color));
309        }
310
311        // Target line
312        let target_color = [1.0, 0.2, 0.2, 0.5];
313        for i in (0..n).step_by(x_step.max(1) * 2) {
314            let x = i as f32 * 0.01;
315            glyphs.push((Vec2::new(x, target as f32), 'โ”€', target_color));
316        }
317
318        glyphs
319    }
320}
321
322impl Default for MonteCarloRenderer {
323    fn default() -> Self {
324        Self::new()
325    }
326}
327
328// ---------------------------------------------------------------------------
329// Tests
330// ---------------------------------------------------------------------------
331
332#[cfg(test)]
333mod tests {
334    use super::*;
335
336    #[test]
337    fn test_estimate_pi() {
338        let mut rng = Rng::new(42);
339        let pi = estimate_pi(100_000, &mut rng);
340        assert!(
341            (pi - std::f64::consts::PI).abs() < 0.05,
342            "pi estimate {} should be close to {}",
343            pi,
344            std::f64::consts::PI
345        );
346    }
347
348    #[test]
349    fn test_integrate_constant() {
350        let mut rng = Rng::new(42);
351        // integral of 5 from 0 to 2 = 10
352        let result = integrate(&|_x| 5.0, 0.0, 2.0, 50_000, &mut rng);
353        assert!(
354            (result - 10.0).abs() < 0.1,
355            "integral of 5 over [0,2] should be 10, got {}",
356            result
357        );
358    }
359
360    #[test]
361    fn test_integrate_linear() {
362        let mut rng = Rng::new(42);
363        // integral of x from 0 to 1 = 0.5
364        let result = integrate(&|x| x, 0.0, 1.0, 100_000, &mut rng);
365        assert!(
366            (result - 0.5).abs() < 0.01,
367            "integral of x over [0,1] should be 0.5, got {}",
368            result
369        );
370    }
371
372    #[test]
373    fn test_integrate_quadratic() {
374        let mut rng = Rng::new(42);
375        // integral of x^2 from 0 to 1 = 1/3
376        let result = integrate(&|x| x * x, 0.0, 1.0, 100_000, &mut rng);
377        assert!(
378            (result - 1.0 / 3.0).abs() < 0.01,
379            "integral of x^2 over [0,1] should be 1/3, got {}",
380            result
381        );
382    }
383
384    #[test]
385    fn test_integrate_2d() {
386        let mut rng = Rng::new(42);
387        // integral of 1 over [0,1]x[0,1] = 1
388        let result = integrate_2d(&|_x, _y| 1.0, 0.0, 1.0, 0.0, 1.0, 50_000, &mut rng);
389        assert!((result - 1.0).abs() < 0.05);
390    }
391
392    #[test]
393    fn test_monte_carlo_sim() {
394        let mut rng = Rng::new(42);
395        let result = MonteCarloSim::<f64>::run(10_000, |_i| {
396            let x = rng.uniform();
397            (x, x)
398        });
399        // Mean of U[0,1] = 0.5
400        assert!(
401            (result.mean - 0.5).abs() < 0.02,
402            "mean should be ~0.5, got {}",
403            result.mean
404        );
405        // Variance of U[0,1] = 1/12 โ‰ˆ 0.0833
406        assert!(
407            (result.variance - 1.0 / 12.0).abs() < 0.01,
408            "variance should be ~1/12, got {}",
409            result.variance
410        );
411    }
412
413    #[test]
414    fn test_histogram() {
415        let samples: Vec<f64> = (0..1000).map(|i| i as f64 / 1000.0).collect();
416        let hist = Histogram::from_samples(&samples, 10);
417        assert_eq!(hist.bins.len(), 10);
418        // Each bin should have ~100 samples
419        for &count in &hist.bins {
420            assert!(count >= 80 && count <= 120, "bin count {} out of range", count);
421        }
422    }
423
424    #[test]
425    fn test_histogram_density_integrates_to_one() {
426        let mut rng = Rng::new(42);
427        let samples: Vec<f64> = (0..10_000).map(|_| rng.normal()).collect();
428        let hist = Histogram::from_samples(&samples, 50);
429        let total: f64 = (0..hist.bin_count)
430            .map(|i| hist.density(i) * hist.bin_width())
431            .sum();
432        assert!(
433            (total - 1.0).abs() < 0.05,
434            "histogram density should integrate to ~1, got {}",
435            total
436        );
437    }
438
439    #[test]
440    fn test_running_mean() {
441        let values = vec![1.0, 3.0, 5.0, 7.0];
442        let means = running_mean(&values);
443        assert!((means[0] - 1.0).abs() < 1e-10);
444        assert!((means[1] - 2.0).abs() < 1e-10);
445        assert!((means[2] - 3.0).abs() < 1e-10);
446        assert!((means[3] - 4.0).abs() < 1e-10);
447    }
448
449    #[test]
450    fn test_importance_sampling() {
451        let mut rng = Rng::new(42);
452        // Estimate E[x] where x ~ N(0,1), using proposal N(0,2)
453        let result = importance_sampling(
454            &|x| (-0.5 * x * x).exp(), // target: standard normal (unnormalized)
455            &|rng: &mut Rng| rng.normal() * 2.0, // proposal: N(0, 4)
456            &|x| (-x * x / 8.0).exp(), // proposal density (unnormalized)
457            &|x| x * x, // h(x) = x^2, so E[x^2] = 1
458            50_000,
459            &mut rng,
460        );
461        assert!(
462            (result - 1.0).abs() < 0.2,
463            "IS estimate of E[X^2] should be ~1, got {}",
464            result
465        );
466    }
467
468    #[test]
469    fn test_confidence_interval() {
470        let mut rng = Rng::new(42);
471        let result = MonteCarloSim::<f64>::run(10_000, |_| {
472            let x = rng.normal();
473            (x, x)
474        });
475        // 95% CI should contain 0
476        assert!(result.confidence_interval.0 < 0.0);
477        assert!(result.confidence_interval.1 > 0.0);
478    }
479
480    #[test]
481    fn test_renderer_histogram() {
482        let hist = Histogram::from_samples(&[1.0, 2.0, 3.0, 2.0, 2.0], 3);
483        let renderer = MonteCarloRenderer::new();
484        let glyphs = renderer.render_histogram(&hist);
485        assert!(!glyphs.is_empty());
486    }
487
488    #[test]
489    fn test_pi_convergence() {
490        let mut rng = Rng::new(42);
491        let values: Vec<f64> = (0..10_000)
492            .map(|_| {
493                let x = rng.uniform();
494                let y = rng.uniform();
495                if x * x + y * y <= 1.0 { 4.0 } else { 0.0 }
496            })
497            .collect();
498        let means = running_mean(&values);
499        // Later estimates should be closer to pi than earlier ones
500        let early_error = (means[100] - std::f64::consts::PI).abs();
501        let late_error = (means[9999] - std::f64::consts::PI).abs();
502        assert!(
503            late_error <= early_error + 0.5,
504            "convergence: late error {} should generally be <= early error {}",
505            late_error,
506            early_error
507        );
508    }
509}