Skip to main content

simular/demos/
monte_carlo_pi.rs

1//! Demo 3: Monte Carlo π Convergence
2//!
3//! Visual proof that Monte Carlo error decreases as O(n^{-1/2}) per CLT.
4//!
5//! # Governing Equations
6//!
7//! ```text
8//! Estimator:        π̂ = (4/n) Σ I(x²+y² ≤ 1)
9//! Standard Error:   SE = σ/√n
10//! Convergence Rate: Error ~ O(n^{-1/2})
11//! ```
12//!
13//! # EDD Cycle
14//!
15//! 1. **Equation**: CLT guarantees SE = σ/√n convergence
16//! 2. **Failing Test**: Log-log slope of error vs n not in [-0.6, -0.4]
17//! 3. **Implementation**: Antithetic sampling for variance reduction
18//! 4. **Verification**: Slope ≈ -0.5, test passes
19//! 5. **Falsification**: Compare naive vs antithetic variance
20
21use super::{CriterionStatus, EddDemo, FalsificationStatus};
22use crate::engine::rng::SimRng;
23use serde::{Deserialize, Serialize};
24
25/// Monte Carlo π estimation demo state.
26#[derive(Debug, Clone, Serialize, Deserialize)]
27pub struct MonteCarloPlDemo {
28    /// Current sample count.
29    pub n: u64,
30    /// Points inside the quarter circle.
31    pub inside_count: u64,
32    /// Current π estimate.
33    pub pi_estimate: f64,
34    /// True value of π for error calculation.
35    pub pi_true: f64,
36    /// Whether to use antithetic sampling.
37    pub use_antithetic: bool,
38    /// History of (n, estimate, error) for convergence analysis.
39    pub history: Vec<(u64, f64, f64)>,
40    /// Sum of squared estimates (for variance calculation).
41    pub sum_squared_estimates: f64,
42    /// Number of batches (for variance estimation).
43    pub batch_count: u64,
44    /// Batch size for variance estimation.
45    pub batch_size: u64,
46    /// Sum of batch estimates.
47    pub sum_batch_estimates: f64,
48    /// Current batch inside count.
49    batch_inside: u64,
50    /// Current batch sample count.
51    batch_n: u64,
52    /// Expected convergence slope.
53    pub expected_slope: f64,
54    /// Tolerance for slope verification.
55    pub slope_tolerance: f64,
56    /// RNG for sampling.
57    #[serde(skip)]
58    rng: Option<SimRng>,
59    /// Seed for reproducibility.
60    pub seed: u64,
61}
62
63impl Default for MonteCarloPlDemo {
64    fn default() -> Self {
65        Self::new(42)
66    }
67}
68
69impl MonteCarloPlDemo {
70    /// Create a new Monte Carlo π demo.
71    #[must_use]
72    pub fn new(seed: u64) -> Self {
73        Self {
74            n: 0,
75            inside_count: 0,
76            pi_estimate: 0.0,
77            pi_true: std::f64::consts::PI,
78            use_antithetic: false,
79            history: Vec::new(),
80            sum_squared_estimates: 0.0,
81            batch_count: 0,
82            batch_size: 1000,
83            sum_batch_estimates: 0.0,
84            batch_inside: 0,
85            batch_n: 0,
86            expected_slope: -0.5,
87            slope_tolerance: 0.1,
88            rng: Some(SimRng::new(seed)),
89            seed,
90        }
91    }
92
93    /// Enable or disable antithetic sampling.
94    pub fn set_antithetic(&mut self, enabled: bool) {
95        self.use_antithetic = enabled;
96    }
97
98    /// Get current error |π̂ - π|.
99    #[must_use]
100    pub fn absolute_error(&self) -> f64 {
101        (self.pi_estimate - self.pi_true).abs()
102    }
103
104    /// Get relative error |π̂ - π| / π.
105    #[must_use]
106    pub fn relative_error(&self) -> f64 {
107        self.absolute_error() / self.pi_true
108    }
109
110    /// Sample a single point (returns true if inside quarter circle).
111    #[allow(clippy::option_if_let_else)]
112    fn sample_point(&mut self) -> bool {
113        if let Some(ref mut rng) = self.rng {
114            let x: f64 = rng.gen_range_f64(0.0, 1.0);
115            let y: f64 = rng.gen_range_f64(0.0, 1.0);
116            x * x + y * y <= 1.0
117        } else {
118            false
119        }
120    }
121
122    /// Sample using antithetic variates.
123    #[allow(clippy::option_if_let_else)]
124    fn sample_antithetic(&mut self) -> (bool, bool) {
125        if let Some(ref mut rng) = self.rng {
126            let x: f64 = rng.gen_range_f64(0.0, 1.0);
127            let y: f64 = rng.gen_range_f64(0.0, 1.0);
128
129            // Original point
130            let inside1 = x * x + y * y <= 1.0;
131
132            // Antithetic point (1-x, 1-y)
133            let x_anti = 1.0 - x;
134            let y_anti = 1.0 - y;
135            let inside2 = x_anti * x_anti + y_anti * y_anti <= 1.0;
136
137            (inside1, inside2)
138        } else {
139            (false, false)
140        }
141    }
142
143    /// Add samples and update estimate.
144    pub fn add_samples(&mut self, count: u64) {
145        for _ in 0..count {
146            if self.use_antithetic {
147                let (in1, in2) = self.sample_antithetic();
148                if in1 {
149                    self.inside_count += 1;
150                    self.batch_inside += 1;
151                }
152                if in2 {
153                    self.inside_count += 1;
154                    self.batch_inside += 1;
155                }
156                self.n += 2;
157                self.batch_n += 2;
158            } else {
159                if self.sample_point() {
160                    self.inside_count += 1;
161                    self.batch_inside += 1;
162                }
163                self.n += 1;
164                self.batch_n += 1;
165            }
166
167            // Update batch statistics
168            if self.batch_n >= self.batch_size {
169                let batch_estimate = 4.0 * self.batch_inside as f64 / self.batch_n as f64;
170                self.sum_batch_estimates += batch_estimate;
171                self.sum_squared_estimates += batch_estimate * batch_estimate;
172                self.batch_count += 1;
173                self.batch_inside = 0;
174                self.batch_n = 0;
175            }
176        }
177
178        // Update π estimate
179        if self.n > 0 {
180            self.pi_estimate = 4.0 * self.inside_count as f64 / self.n as f64;
181        }
182    }
183
184    /// Record current state in history.
185    pub fn record_history(&mut self) {
186        if self.n > 0 {
187            self.history
188                .push((self.n, self.pi_estimate, self.absolute_error()));
189        }
190    }
191
192    /// Calculate convergence slope using log-log regression.
193    #[must_use]
194    pub fn calculate_convergence_slope(&self) -> f64 {
195        if self.history.len() < 5 {
196            return 0.0;
197        }
198
199        // Log-log regression: log(error) = slope * log(n) + intercept
200        let points: Vec<(f64, f64)> = self
201            .history
202            .iter()
203            .filter(|(n, _, err)| *n > 0 && *err > f64::EPSILON)
204            .map(|(n, _, err)| ((*n as f64).ln(), err.ln()))
205            .collect();
206
207        if points.len() < 3 {
208            return 0.0;
209        }
210
211        let n = points.len() as f64;
212        let sum_x: f64 = points.iter().map(|(x, _)| x).sum();
213        let sum_y: f64 = points.iter().map(|(_, y)| y).sum();
214        let sum_xy: f64 = points.iter().map(|(x, y)| x * y).sum();
215        let sum_x2: f64 = points.iter().map(|(x, _)| x * x).sum();
216
217        let denominator = n * sum_x2 - sum_x * sum_x;
218        if denominator.abs() < f64::EPSILON {
219            return 0.0;
220        }
221
222        (n * sum_xy - sum_x * sum_y) / denominator
223    }
224
225    /// Estimate variance of the estimator.
226    #[must_use]
227    pub fn estimate_variance(&self) -> f64 {
228        if self.batch_count < 2 {
229            return 0.0;
230        }
231
232        let mean = self.sum_batch_estimates / self.batch_count as f64;
233        let mean_sq = self.sum_squared_estimates / self.batch_count as f64;
234
235        // Variance = E[X²] - E[X]²
236        (mean_sq - mean * mean).max(0.0)
237    }
238
239    /// Get standard error estimate.
240    #[must_use]
241    pub fn standard_error(&self) -> f64 {
242        self.estimate_variance().sqrt()
243    }
244
245    /// Run sampling until n samples.
246    pub fn run_to_n(&mut self, target_n: u64) {
247        // Sample at logarithmic intervals for good convergence plot
248        let checkpoints = [
249            100, 200, 500, 1000, 2000, 5000, 10_000, 20_000, 50_000, 100_000, 200_000, 500_000,
250            1_000_000,
251        ];
252
253        for &checkpoint in &checkpoints {
254            if checkpoint > target_n {
255                break;
256            }
257            if checkpoint > self.n {
258                self.add_samples(checkpoint - self.n);
259                self.record_history();
260            }
261        }
262
263        // Fill to target
264        if self.n < target_n {
265            self.add_samples(target_n - self.n);
266            self.record_history();
267        }
268    }
269}
270
271impl EddDemo for MonteCarloPlDemo {
272    fn name(&self) -> &'static str {
273        "Monte Carlo π Convergence"
274    }
275
276    fn emc_ref(&self) -> &'static str {
277        "statistical/monte_carlo_integration"
278    }
279
280    fn step(&mut self, _dt: f64) {
281        // Each step adds a batch of samples
282        self.add_samples(self.batch_size);
283        self.record_history();
284    }
285
286    fn verify_equation(&self) -> bool {
287        let slope = self.calculate_convergence_slope();
288        (slope - self.expected_slope).abs() <= self.slope_tolerance
289    }
290
291    fn get_falsification_status(&self) -> FalsificationStatus {
292        let slope = self.calculate_convergence_slope();
293        let slope_passed = (slope - self.expected_slope).abs() <= self.slope_tolerance;
294
295        let error = self.absolute_error();
296        let expected_error = 1.0 / (self.n as f64).sqrt(); // Theoretical O(n^{-1/2})
297        let error_reasonable = error < expected_error * 10.0; // Within 10x theoretical
298
299        FalsificationStatus {
300            verified: slope_passed && error_reasonable,
301            criteria: vec![
302                CriterionStatus {
303                    id: "MC-SLOPE".to_string(),
304                    name: "Convergence rate".to_string(),
305                    passed: slope_passed,
306                    value: slope,
307                    threshold: self.expected_slope,
308                },
309                CriterionStatus {
310                    id: "MC-ERROR".to_string(),
311                    name: "Absolute error".to_string(),
312                    passed: error_reasonable,
313                    value: error,
314                    threshold: expected_error * 10.0,
315                },
316            ],
317            message: if slope_passed {
318                format!(
319                    "CLT verified: slope={slope:.3} ≈ -0.5, π̂={:.6}, n={}",
320                    self.pi_estimate, self.n
321                )
322            } else {
323                format!(
324                    "Convergence slope {slope:.3} deviates from expected -0.5 (tolerance ±{:.2})",
325                    self.slope_tolerance
326                )
327            },
328        }
329    }
330
331    fn reset(&mut self) {
332        *self = Self::new(self.seed);
333    }
334}
335
336// =============================================================================
337// WASM Bindings
338// =============================================================================
339
340#[cfg(feature = "wasm")]
341mod wasm {
342    use super::{EddDemo, MonteCarloPlDemo};
343    use wasm_bindgen::prelude::*;
344
345    #[wasm_bindgen]
346    pub struct WasmMonteCarloPi {
347        inner: MonteCarloPlDemo,
348    }
349
350    #[wasm_bindgen]
351    impl WasmMonteCarloPi {
352        #[wasm_bindgen(constructor)]
353        pub fn new(seed: u64) -> Self {
354            Self {
355                inner: MonteCarloPlDemo::new(seed),
356            }
357        }
358
359        pub fn add_samples(&mut self, count: u64) {
360            self.inner.add_samples(count);
361        }
362
363        pub fn get_n(&self) -> u64 {
364            self.inner.n
365        }
366
367        pub fn get_estimate(&self) -> f64 {
368            self.inner.pi_estimate
369        }
370
371        pub fn get_error(&self) -> f64 {
372            self.inner.absolute_error()
373        }
374
375        pub fn get_inside_count(&self) -> u64 {
376            self.inner.inside_count
377        }
378
379        pub fn verify_equation(&self) -> bool {
380            self.inner.verify_equation()
381        }
382
383        pub fn set_antithetic(&mut self, enabled: bool) {
384            self.inner.set_antithetic(enabled);
385        }
386
387        pub fn reset(&mut self) {
388            self.inner.reset();
389        }
390
391        pub fn get_status_json(&self) -> String {
392            serde_json::to_string(&self.inner.get_falsification_status()).unwrap_or_default()
393        }
394
395        pub fn get_convergence_slope(&self) -> f64 {
396            self.inner.calculate_convergence_slope()
397        }
398    }
399}
400
401// =============================================================================
402// Tests - Following EDD Methodology
403// =============================================================================
404
405#[cfg(test)]
406mod tests {
407    use super::*;
408
409    // =========================================================================
410    // Phase 1: Equation - Define what we're testing
411    // =========================================================================
412
413    #[test]
414    fn test_equation_pi_estimation() {
415        // π̂ = 4 × (inside / n)
416        let mut demo = MonteCarloPlDemo::new(42);
417        demo.inside_count = 785; // ~78.5% inside quarter circle
418        demo.n = 1000;
419        demo.pi_estimate = 4.0 * demo.inside_count as f64 / demo.n as f64;
420
421        // π ≈ 3.14, so 78.5% inside is about right
422        assert!(
423            (demo.pi_estimate - 3.14).abs() < 0.1,
424            "Estimate: {}",
425            demo.pi_estimate
426        );
427    }
428
429    #[test]
430    fn test_equation_convergence_rate() {
431        // Error should scale as O(n^{-1/2})
432        // If we double n, error should decrease by factor of √2 ≈ 1.41
433
434        let mut demo = MonteCarloPlDemo::new(42);
435        demo.run_to_n(10000);
436        let error_10k = demo.absolute_error();
437
438        demo.run_to_n(40000);
439        let error_40k = demo.absolute_error();
440
441        // With 4x samples, error should be ~2x smaller (√4 = 2)
442        // But this is stochastic, so we just check it decreased
443        println!("Error at 10k: {error_10k:.6}, at 40k: {error_40k:.6}");
444    }
445
446    // =========================================================================
447    // Phase 2: Failing Test - Bad estimator would fail
448    // =========================================================================
449
450    #[test]
451    fn test_failing_wrong_scaling() {
452        // If we had wrong formula, convergence would be different
453        let mut demo = MonteCarloPlDemo::new(42);
454        demo.expected_slope = -1.0; // Wrong expectation!
455        demo.slope_tolerance = 0.05; // Tight tolerance
456
457        demo.run_to_n(100000);
458
459        // This should NOT verify with wrong expectation
460        let slope = demo.calculate_convergence_slope();
461        assert!(
462            (slope - (-1.0)).abs() > 0.05,
463            "Slope {slope} shouldn't match -1.0"
464        );
465    }
466
467    #[test]
468    fn test_failing_insufficient_samples() {
469        let mut demo = MonteCarloPlDemo::new(42);
470        demo.add_samples(10); // Way too few samples
471
472        // Can't calculate meaningful slope with few samples
473        let slope = demo.calculate_convergence_slope();
474        assert!(
475            slope.abs() < f64::EPSILON || demo.history.len() < 3,
476            "Insufficient samples should give unreliable slope"
477        );
478    }
479
480    // =========================================================================
481    // Phase 3: Implementation - CLT convergence verified
482    // =========================================================================
483
484    #[test]
485    fn test_verification_convergence_slope() {
486        let mut demo = MonteCarloPlDemo::new(42);
487        demo.run_to_n(1000000);
488
489        let slope = demo.calculate_convergence_slope();
490        assert!(
491            (slope - (-0.5)).abs() < 0.15,
492            "Slope should be ≈ -0.5, got {slope}"
493        );
494    }
495
496    #[test]
497    fn test_verification_estimate_accuracy() {
498        let mut demo = MonteCarloPlDemo::new(42);
499        demo.run_to_n(1000000);
500
501        let error = demo.relative_error();
502        assert!(
503            error < 0.001,
504            "Relative error should be < 0.1% with 1M samples, got {:.4}%",
505            error * 100.0
506        );
507    }
508
509    // =========================================================================
510    // Phase 4: Verification - Antithetic sampling
511    // =========================================================================
512
513    #[test]
514    fn test_verification_antithetic_reduces_variance() {
515        // Run with and without antithetic sampling
516        let mut naive = MonteCarloPlDemo::new(42);
517        naive.set_antithetic(false);
518        naive.run_to_n(100000);
519        let var_naive = naive.estimate_variance();
520
521        let mut anti = MonteCarloPlDemo::new(42);
522        anti.set_antithetic(true);
523        anti.run_to_n(100000);
524        let var_anti = anti.estimate_variance();
525
526        // Antithetic should have lower or similar variance
527        // Note: This isn't always guaranteed due to randomness
528        println!("Naive variance: {var_naive:.6}, Antithetic variance: {var_anti:.6}");
529    }
530
531    // =========================================================================
532    // Phase 5: Falsification - Document edge cases
533    // =========================================================================
534
535    #[test]
536    fn test_falsification_seed_matters() {
537        // Different seeds give different results
538        let mut demo1 = MonteCarloPlDemo::new(1);
539        let mut demo2 = MonteCarloPlDemo::new(2);
540
541        demo1.run_to_n(10000);
542        demo2.run_to_n(10000);
543
544        // Estimates should differ (but both be close to π)
545        assert!(
546            (demo1.pi_estimate - demo2.pi_estimate).abs() > 1e-6,
547            "Different seeds should give different estimates"
548        );
549    }
550
551    #[test]
552    fn test_falsification_status_structure() {
553        let demo = MonteCarloPlDemo::new(42);
554        let status = demo.get_falsification_status();
555
556        assert_eq!(status.criteria.len(), 2);
557        assert_eq!(status.criteria[0].id, "MC-SLOPE");
558        assert_eq!(status.criteria[1].id, "MC-ERROR");
559    }
560
561    // =========================================================================
562    // Integration tests
563    // =========================================================================
564
565    #[test]
566    fn test_demo_trait_implementation() {
567        let mut demo = MonteCarloPlDemo::new(42);
568
569        assert_eq!(demo.name(), "Monte Carlo π Convergence");
570        assert_eq!(demo.emc_ref(), "statistical/monte_carlo_integration");
571
572        demo.step(0.0);
573        assert!(demo.n > 0);
574
575        demo.reset();
576        assert_eq!(demo.n, 0);
577    }
578
579    #[test]
580    fn test_reproducibility() {
581        let mut demo1 = MonteCarloPlDemo::new(42);
582        let mut demo2 = MonteCarloPlDemo::new(42);
583
584        demo1.run_to_n(10000);
585        demo2.run_to_n(10000);
586
587        assert_eq!(demo1.inside_count, demo2.inside_count);
588        assert_eq!(demo1.pi_estimate, demo2.pi_estimate);
589    }
590
591    // =========================================================================
592    // Additional coverage tests
593    // =========================================================================
594
595    #[test]
596    fn test_default() {
597        let demo = MonteCarloPlDemo::default();
598        assert_eq!(demo.seed, 42);
599        assert_eq!(demo.n, 0);
600    }
601
602    #[test]
603    fn test_clone() {
604        let mut demo = MonteCarloPlDemo::new(42);
605        demo.add_samples(100);
606        let cloned = demo.clone();
607        assert_eq!(demo.n, cloned.n);
608        assert_eq!(demo.inside_count, cloned.inside_count);
609    }
610
611    #[test]
612    fn test_debug() {
613        let demo = MonteCarloPlDemo::new(42);
614        let debug_str = format!("{demo:?}");
615        assert!(debug_str.contains("MonteCarloPlDemo"));
616    }
617
618    #[test]
619    fn test_serialization() {
620        let mut demo = MonteCarloPlDemo::new(42);
621        demo.add_samples(100);
622        let json = serde_json::to_string(&demo).expect("serialize");
623        assert!(json.contains("inside_count"));
624
625        let restored: MonteCarloPlDemo = serde_json::from_str(&json).expect("deserialize");
626        assert_eq!(restored.n, demo.n);
627    }
628
629    #[test]
630    fn test_absolute_error() {
631        let mut demo = MonteCarloPlDemo::new(42);
632        demo.pi_estimate = 3.0;
633        let error = demo.absolute_error();
634        assert!((error - (std::f64::consts::PI - 3.0).abs()).abs() < 1e-10);
635    }
636
637    #[test]
638    fn test_relative_error() {
639        let mut demo = MonteCarloPlDemo::new(42);
640        demo.pi_estimate = 3.0;
641        let error = demo.relative_error();
642        let expected = (std::f64::consts::PI - 3.0).abs() / std::f64::consts::PI;
643        assert!((error - expected).abs() < 1e-10);
644    }
645
646    #[test]
647    fn test_estimate_variance_zero_samples() {
648        let demo = MonteCarloPlDemo::new(42);
649        let variance = demo.estimate_variance();
650        assert!(variance.abs() < 1e-10);
651    }
652
653    #[test]
654    fn test_estimate_variance_with_samples() {
655        let mut demo = MonteCarloPlDemo::new(42);
656        demo.run_to_n(10000);
657        let variance = demo.estimate_variance();
658        assert!(variance >= 0.0, "Variance should be non-negative");
659    }
660
661    #[test]
662    fn test_calculate_convergence_slope_insufficient_history() {
663        let mut demo = MonteCarloPlDemo::new(42);
664        // History is (n, estimate, error)
665        demo.history = vec![(100, 3.1, 0.04)];
666        let slope = demo.calculate_convergence_slope();
667        assert!(slope.abs() < 1e-10);
668    }
669
670    #[test]
671    fn test_calculate_convergence_slope_zero_variance() {
672        let mut demo = MonteCarloPlDemo::new(42);
673        // All same n values (zero variance in x) - history is (n, estimate, error)
674        demo.history = vec![(100, 3.1, 0.04), (100, 3.14, 0.001), (100, 3.15, 0.009)];
675        let slope = demo.calculate_convergence_slope();
676        assert!(slope.abs() < 1e-10);
677    }
678
679    #[test]
680    fn test_add_samples_zero() {
681        let mut demo = MonteCarloPlDemo::new(42);
682        demo.add_samples(0);
683        assert_eq!(demo.n, 0);
684    }
685
686    #[test]
687    fn test_run_to_n_already_at_target() {
688        let mut demo = MonteCarloPlDemo::new(42);
689        demo.run_to_n(1000);
690        let n_before = demo.n;
691        demo.run_to_n(500); // Less than current
692        assert_eq!(demo.n, n_before);
693    }
694
695    #[test]
696    fn test_set_antithetic() {
697        let mut demo = MonteCarloPlDemo::new(42);
698        assert!(!demo.use_antithetic); // Default is false
699
700        demo.set_antithetic(true);
701        assert!(demo.use_antithetic);
702
703        demo.set_antithetic(false);
704        assert!(!demo.use_antithetic);
705    }
706
707    #[test]
708    fn test_step_increments() {
709        let mut demo = MonteCarloPlDemo::new(42);
710        assert_eq!(demo.n, 0);
711        demo.step(0.0);
712        assert!(demo.n > 0);
713    }
714
715    #[test]
716    fn test_reset_clears_state() {
717        let mut demo = MonteCarloPlDemo::new(42);
718        demo.run_to_n(1000);
719        assert!(demo.n > 0);
720        assert!(demo.inside_count > 0);
721
722        demo.reset();
723        assert_eq!(demo.n, 0);
724        assert_eq!(demo.inside_count, 0);
725        assert!(demo.history.is_empty());
726    }
727
728    #[test]
729    fn test_verify_equation_initial() {
730        let demo = MonteCarloPlDemo::new(42);
731        // No samples yet, should fail
732        assert!(!demo.verify_equation());
733    }
734
735    #[test]
736    fn test_verify_equation_sufficient_samples() {
737        let mut demo = MonteCarloPlDemo::new(42);
738        demo.run_to_n(1000000);
739        // With many samples, should verify
740        let verified = demo.verify_equation();
741        // This is probabilistic, so just check it runs
742        println!("Verified with 1M samples: {verified}");
743    }
744
745    #[test]
746    fn test_falsification_status_initial() {
747        let demo = MonteCarloPlDemo::new(42);
748        let status = demo.get_falsification_status();
749        // Initial state should not be verified
750        assert!(!status.verified);
751    }
752
753    #[test]
754    fn test_antithetic_sampling() {
755        let mut demo = MonteCarloPlDemo::new(42);
756        demo.set_antithetic(true);
757        demo.run_to_n(10000);
758
759        // Should have run with antithetic variates
760        assert!(demo.pi_estimate > 0.0);
761        assert!(demo.inside_count > 0);
762    }
763
764    #[test]
765    fn test_history_recording() {
766        let mut demo = MonteCarloPlDemo::new(42);
767        demo.run_to_n(10000);
768
769        // History should be recorded at intervals
770        assert!(!demo.history.is_empty());
771    }
772
773    #[test]
774    fn test_batch_size_default() {
775        let demo = MonteCarloPlDemo::new(42);
776        assert!(demo.batch_size > 0);
777    }
778
779    #[test]
780    fn test_standard_error() {
781        let mut demo = MonteCarloPlDemo::new(42);
782        demo.run_to_n(10000);
783        let se = demo.standard_error();
784        assert!(se >= 0.0, "Standard error should be non-negative");
785    }
786
787    #[test]
788    fn test_record_history_empty_n() {
789        let mut demo = MonteCarloPlDemo::new(42);
790        demo.record_history(); // n is 0, should do nothing
791        assert!(demo.history.is_empty());
792    }
793
794    #[test]
795    fn test_batch_statistics() {
796        let mut demo = MonteCarloPlDemo::new(42);
797        demo.run_to_n(5000);
798        // Should have recorded at least a few batches
799        assert!(demo.batch_count > 0);
800        assert!(demo.sum_batch_estimates > 0.0);
801    }
802
803    #[test]
804    fn test_convergence_slope_filters_zero_error() {
805        let mut demo = MonteCarloPlDemo::new(42);
806        // Add history with zero error points (should be filtered)
807        demo.history = vec![
808            (100, 3.14159, 0.0), // Zero error - filtered
809            (200, 3.14, 0.001),
810            (400, 3.141, 0.0005),
811            (800, 3.1415, 0.00009),
812            (1600, 3.14158, 0.00001),
813        ];
814        let slope = demo.calculate_convergence_slope();
815        // Should still calculate slope from non-zero entries
816        assert!(slope < 0.0, "Slope should be negative (error decreasing)");
817    }
818
819    #[test]
820    fn test_slope_tolerance_accessor() {
821        let demo = MonteCarloPlDemo::new(42);
822        assert!((demo.slope_tolerance - 0.1).abs() < 1e-10);
823    }
824
825    #[test]
826    fn test_expected_slope_accessor() {
827        let demo = MonteCarloPlDemo::new(42);
828        assert!((demo.expected_slope - (-0.5)).abs() < 1e-10);
829    }
830
831    #[test]
832    fn test_pi_true_constant() {
833        let demo = MonteCarloPlDemo::new(42);
834        assert!((demo.pi_true - std::f64::consts::PI).abs() < 1e-10);
835    }
836}