Skip to main content

oxiphysics_core/sampling/
functions_2.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5#![allow(clippy::needless_range_loop)]
6#[allow(unused_imports)]
7use super::functions::*;
8use super::functions::{min_pairwise_dist, rejection_sample_1d};
9use super::types::{LatinHypercube, Lcg, SobolSequence};
10
11#[cfg(test)]
12mod additional_sampling_tests {
13    use super::*;
14
15    use crate::sampling::GibbsSampler;
16    use crate::sampling::HaltonSequence;
17    use crate::sampling::ImportanceSampler;
18    use crate::sampling::LatinHypercube;
19    use crate::sampling::Lcg;
20    use crate::sampling::MetropolisHastings;
21    use crate::sampling::Sobol;
22
23    #[test]
24    fn test_lcg_next_u64_non_zero_seed() {
25        let mut rng = Lcg::new(1);
26        let v = rng.next_u64();
27        assert_ne!(v, 0, "LCG should generate non-zero for seed=1");
28    }
29    #[test]
30    fn test_lcg_different_seeds_different_outputs() {
31        let mut r1 = Lcg::new(1);
32        let mut r2 = Lcg::new(2);
33        let v1 = r1.next_u64();
34        let v2 = r2.next_u64();
35        assert_ne!(v1, v2, "Different seeds should produce different values");
36    }
37    #[test]
38    fn test_lcg_range_stays_in_bounds() {
39        let mut rng = Lcg::new(42);
40        for _ in 0..1_000 {
41            let v = rng.next_f64_range(-5.0, 5.0);
42            assert!((-5.0..5.0).contains(&v), "value {} out of range [-5,5)", v);
43        }
44    }
45    #[test]
46    fn test_lcg_normal_pair_independent() {
47        let mut rng = Lcg::new(123);
48        let (mut pos0, mut neg0, mut pos1, mut neg1) = (0, 0, 0, 0);
49        for _ in 0..200 {
50            let (z0, z1) = rng.next_normal_pair();
51            if z0 > 0.0 {
52                pos0 += 1;
53            } else {
54                neg0 += 1;
55            }
56            if z1 > 0.0 {
57                pos1 += 1;
58            } else {
59                neg1 += 1;
60            }
61        }
62        assert!(pos0 > 50 && neg0 > 50, "z0 should span both halves");
63        assert!(pos1 > 50 && neg1 > 50, "z1 should span both halves");
64    }
65    #[test]
66    fn test_lcg_normal_std_near_one() {
67        let mut rng = Lcg::new(999);
68        let n = 5_000;
69        let samples: Vec<f64> = (0..n).map(|_| rng.next_normal()).collect();
70        let mean = samples.iter().sum::<f64>() / n as f64;
71        let var = samples.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n as f64;
72        let std = var.sqrt();
73        assert!(
74            (std - 1.0).abs() < 0.1,
75            "std of normal samples should be ~1: std={}",
76            std
77        );
78    }
79    #[test]
80    fn test_stratified_1d_in_range() {
81        let mut rng = Lcg::new(11);
82        let samples = stratified_sample_1d(50, &mut rng);
83        for &s in &samples {
84            assert!((0.0..1.0).contains(&s), "value {} out of [0,1)", s);
85        }
86    }
87    #[test]
88    fn test_stratified_1d_sum_approx_half() {
89        let mut rng = Lcg::new(77);
90        let n = 100;
91        let samples = stratified_sample_1d(n, &mut rng);
92        let mean = samples.iter().sum::<f64>() / n as f64;
93        assert!((mean - 0.5).abs() < 0.1, "mean should be ~0.5: {}", mean);
94    }
95    #[test]
96    fn test_lhs_all_values_in_unit_interval() {
97        let mut rng = Lcg::new(202);
98        let lhs = LatinHypercube::new(20, 5);
99        let samples = lhs.sample(&mut rng);
100        for s in &samples {
101            for &v in s {
102                assert!((0.0..1.0).contains(&v), "LHS value {} out of [0,1)", v);
103            }
104        }
105    }
106    #[test]
107    fn test_lhs_large_n_strata_covered() {
108        let mut rng = Lcg::new(303);
109        let n = 32;
110        let lhs = LatinHypercube::new(n, 4);
111        let samples = lhs.sample(&mut rng);
112        let inv_n = 1.0 / n as f64;
113        for d in 0..4 {
114            let mut occupied = vec![false; n];
115            for s in &samples {
116                let stratum = ((s[d] / inv_n) as usize).min(n - 1);
117                occupied[stratum] = true;
118            }
119            let all_covered = occupied.iter().all(|&x| x);
120            assert!(all_covered, "dim {d} not all strata covered");
121        }
122    }
123    #[test]
124    fn test_sobol_1d_first_value_zero() {
125        let mut sobol = Sobol::new_1d();
126        let first = sobol.next_1d();
127        assert!(first.abs() < 1e-12, "Sobol(0) should be 0.0: {}", first);
128    }
129    #[test]
130    fn test_sobol_1d_no_duplicates() {
131        let mut sobol = Sobol::new_1d();
132        let vals: Vec<f64> = (0..16).map(|_| sobol.next_1d()).collect();
133        let mut sorted = vals.clone();
134        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
135        for w in sorted.windows(2) {
136            assert!((w[1] - w[0]).abs() > 1e-15, "duplicate Sobol values");
137        }
138    }
139    #[test]
140    fn test_sobol_multidim_2d_count() {
141        let sobol = SobolSequence::new(2);
142        let pts = sobol.sample(32);
143        assert_eq!(pts.len(), 32);
144        for p in &pts {
145            assert_eq!(p.len(), 2);
146        }
147    }
148    #[test]
149    fn test_sobol_multidim_values_in_range() {
150        let sobol = SobolSequence::new(3);
151        let pts = sobol.sample(128);
152        for p in &pts {
153            for &v in p {
154                assert!((0.0..1.0).contains(&v), "Sobol value {} out of [0,1)", v);
155            }
156        }
157    }
158    #[test]
159    fn test_halton_base3_first_values() {
160        let samples = HaltonSequence::sample(3, 3);
161        assert!(
162            (samples[0] - 1.0 / 3.0).abs() < 1e-12,
163            "h3[0]={}",
164            samples[0]
165        );
166        assert!(
167            (samples[1] - 2.0 / 3.0).abs() < 1e-12,
168            "h3[1]={}",
169            samples[1]
170        );
171        assert!(
172            (samples[2] - 1.0 / 9.0).abs() < 1e-12,
173            "h3[2]={}",
174            samples[2]
175        );
176    }
177    #[test]
178    fn test_halton_multivariate_shape() {
179        let pts = halton_multivariate(20, 3);
180        assert_eq!(pts.len(), 20);
181        for p in &pts {
182            assert_eq!(p.len(), 3);
183            for &v in p {
184                assert!(
185                    (0.0..1.0).contains(&v),
186                    "Halton multi value {} out of [0,1)",
187                    v
188                );
189            }
190        }
191    }
192    #[test]
193    fn test_halton_sequence_free_fn_count() {
194        let pts = halton_sequence(50, 5);
195        assert_eq!(pts.len(), 50);
196    }
197    #[test]
198    fn test_halton_1d_low_discrepancy_coverage() {
199        let n = 8;
200        let pts = HaltonSequence::sample(n, 2);
201        for &v in &pts {
202            assert!((0.0..1.0).contains(&v), "value {} out of [0,1)", v);
203        }
204        let mut sorted = pts.clone();
205        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
206        for w in sorted.windows(2) {
207            assert!(
208                (w[1] - w[0]).abs() > 1e-12,
209                "duplicate values: {} and {}",
210                w[0],
211                w[1]
212            );
213        }
214        let mean = pts.iter().sum::<f64>() / n as f64;
215        assert!((mean - 0.5).abs() < 0.15, "mean should be ~0.5: {}", mean);
216    }
217    #[test]
218    fn test_mh_acceptance_rate_reasonable() {
219        pub(super) fn log_target(x: f64) -> f64 {
220            -0.5 * x * x
221        }
222        let mut mcmc = MetropolisHastings::new(0.0, 0.5, 42);
223        let rate = mcmc.estimate_acceptance_rate(1000, log_target);
224        assert!(rate > 0.3 && rate <= 1.0, "acceptance rate={}", rate);
225    }
226    #[test]
227    fn test_mh_sample_length() {
228        pub(super) fn log_target(x: f64) -> f64 {
229            -x * x
230        }
231        let mut mcmc = MetropolisHastings::new(0.0, 1.0, 7);
232        let samples = mcmc.sample(300, log_target);
233        assert_eq!(samples.len(), 300);
234    }
235    #[test]
236    fn test_mh_bimodal_target() {
237        pub(super) fn log_target(x: f64) -> f64 {
238            let p1 = (-0.5 * (x - 3.0).powi(2)).exp();
239            let p2 = (-0.5 * (x + 3.0).powi(2)).exp();
240            (p1 + p2).ln()
241        }
242        let mut mcmc = MetropolisHastings::new(3.0, 2.0, 55);
243        let samples = mcmc.sample(2000, log_target);
244        assert_eq!(samples.len(), 2000);
245    }
246    #[test]
247    fn test_gibbs_output_varies() {
248        let mut gibbs = GibbsSampler::new(2, 13);
249        let samples = gibbs.sample(
250            100,
251            &[
252                Box::new(|_, _: &[f64]| 0.0_f64),
253                Box::new(|_, _: &[f64]| 0.0_f64),
254            ],
255            &[1.0, 1.0],
256        );
257        let all_same = samples.iter().all(|s| (s[0] - samples[0][0]).abs() < 1e-12);
258        assert!(!all_same, "Gibbs output should vary across samples");
259    }
260    #[test]
261    fn test_gibbs_3d_shape() {
262        let mut gibbs = GibbsSampler::new(3, 99);
263        let samples = gibbs.sample(
264            50,
265            &[
266                Box::new(|_, _: &[f64]| 1.0_f64),
267                Box::new(|_, _: &[f64]| -1.0_f64),
268                Box::new(|_, _: &[f64]| 0.0_f64),
269            ],
270            &[0.5, 0.5, 0.5],
271        );
272        assert_eq!(samples.len(), 50);
273        for s in &samples {
274            assert_eq!(s.len(), 3);
275        }
276    }
277    #[test]
278    fn test_importance_sampler_envelope_respected() {
279        pub(super) fn linear_pdf(x: f64) -> f64 {
280            2.0 * x
281        }
282        let sampler = ImportanceSampler::new(linear_pdf, 2.0);
283        let mut rng = Lcg::new(707);
284        let samples = sampler.sample(200, &mut rng);
285        let mean = samples.iter().sum::<f64>() / samples.len() as f64;
286        assert!(
287            mean > 0.5,
288            "mean should be > 0.5 for linear pdf: mean={}",
289            mean
290        );
291    }
292    #[test]
293    fn test_importance_sampler_all_in_unit_interval() {
294        pub(super) fn pdf(x: f64) -> f64 {
295            x * x * 3.0
296        }
297        let sampler = ImportanceSampler::new(pdf, 3.0);
298        let mut rng = Lcg::new(808);
299        let samples = sampler.sample(100, &mut rng);
300        for &s in &samples {
301            assert!((0.0..1.0).contains(&s), "sample {} out of [0,1)", s);
302        }
303    }
304    #[test]
305    fn test_blue_noise_empty_for_impossible_radius() {
306        let mut rng = Lcg::new(303);
307        let samples = blue_noise_2d(100, 0.9, &mut rng);
308        assert!(
309            samples.len() < 5,
310            "very large radius should yield few samples: {}",
311            samples.len()
312        );
313    }
314    #[test]
315    fn test_blue_noise_in_unit_square() {
316        let mut rng = Lcg::new(123);
317        let r = 0.1;
318        let samples = blue_noise_2d(30, r, &mut rng);
319        for s in &samples {
320            assert!(s[0] >= 0.0 && s[0] < 1.0, "x={} out of [0,1)", s[0]);
321            assert!(s[1] >= 0.0 && s[1] < 1.0, "y={} out of [0,1)", s[1]);
322        }
323    }
324    #[test]
325    fn test_systematic_resample_uniform_weights() {
326        let particles: Vec<f64> = (0..5).map(|i| i as f64).collect();
327        let weights = vec![1.0; 5];
328        let mut rng = Lcg::new(42);
329        let resampled = systematic_resample(&particles, &weights, 10, &mut rng);
330        assert_eq!(resampled.len(), 10);
331        for &v in &resampled {
332            assert!(
333                (0.0..=4.0).contains(&v),
334                "resampled value {} out of range",
335                v
336            );
337        }
338    }
339    #[test]
340    fn test_systematic_resample_empty() {
341        let particles: Vec<f64> = vec![];
342        let weights: Vec<f64> = vec![];
343        let mut rng = Lcg::new(99);
344        let resampled = systematic_resample(&particles, &weights, 5, &mut rng);
345        assert_eq!(resampled.len(), 0);
346    }
347    #[test]
348    fn test_halton_multivariate_dim1_matches_1d() {
349        let pts = halton_multivariate(10, 1);
350        let seq = HaltonSequence::sample(10, 2);
351        for (p, s) in pts.iter().zip(seq.iter()) {
352            assert!((p[0] - s).abs() < 1e-12, "dim0 mismatch: {} vs {}", p[0], s);
353        }
354    }
355    #[test]
356    fn test_qmc_halton_sine_integral() {
357        let pi = std::f64::consts::PI;
358        let est = qmc_integrate_halton(|x| (x * pi).sin(), 0.0, 1.0, 1024);
359        let est_scaled = est * pi;
360        assert!(
361            (est_scaled - 2.0).abs() < 0.05,
362            "Halton sin integral={}",
363            est_scaled
364        );
365    }
366    #[test]
367    fn test_qmc_sobol_cubic_integral() {
368        let est = qmc_integrate_sobol(|x| x * x * x, 0.0, 1.0, 512);
369        assert!((est - 0.25).abs() < 0.01, "Sobol cubic integral={}", est);
370    }
371    #[test]
372    fn test_bootstrap_resample_single_element() {
373        let data = vec![42.0];
374        let mut rng = Lcg::new(1);
375        let resamples = bootstrap_resample(&data, 5, &mut rng);
376        assert_eq!(resamples.len(), 5);
377        for r in &resamples {
378            assert_eq!(r.len(), 1);
379            assert_eq!(r[0], 42.0);
380        }
381    }
382    #[test]
383    fn test_bootstrap_mean_mean_correct() {
384        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
385        let mut rng = Lcg::new(31415);
386        let (mean, ci_lo, ci_hi) = bootstrap_mean_ci(&data, 200, &mut rng);
387        assert!((mean - 3.0).abs() < 1e-10, "mean={}", mean);
388        assert!(ci_lo < mean && ci_hi > mean, "CI should contain mean");
389    }
390    #[test]
391    fn test_lhs_rand_strata_coverage() {
392        let mut rng = rand::rng();
393        let n = 16;
394        let samples = latin_hypercube_sample(n, 2, &mut rng);
395        let inv_n = 1.0 / n as f64;
396        for d in 0..2 {
397            let mut occupied = vec![false; n];
398            for s in &samples {
399                let stratum = ((s[d] / inv_n) as usize).min(n - 1);
400                occupied[stratum] = true;
401            }
402            for (k, &hit) in occupied.iter().enumerate() {
403                assert!(hit, "dim {d} stratum {k} not covered in rand LHS");
404            }
405        }
406    }
407    #[test]
408    fn test_lhs_rand_all_in_unit() {
409        let mut rng = rand::rng();
410        let samples = latin_hypercube_sample(10, 3, &mut rng);
411        for s in &samples {
412            for &v in s {
413                assert!((0.0..1.0).contains(&v), "rand LHS value {} out of [0,1)", v);
414            }
415        }
416    }
417    #[test]
418    fn test_ess_uniform_weights() {
419        let n = 10;
420        let weights = vec![1.0; n];
421        let ess = effective_sample_size(&weights);
422        assert!(
423            (ess - n as f64).abs() < 0.01,
424            "uniform weights → ESS=N: ess={}",
425            ess
426        );
427    }
428    #[test]
429    fn test_ess_concentrated_weight() {
430        let mut weights = vec![0.001; 100];
431        weights[0] = 100.0;
432        let ess = effective_sample_size(&weights);
433        assert!(ess < 5.0, "concentrated weight → low ESS: ess={}", ess);
434    }
435    #[test]
436    fn test_ess_zero_weights() {
437        let weights = vec![0.0; 5];
438        let ess = effective_sample_size(&weights);
439        assert!(ess.abs() < 1e-10, "zero weights → ESS=0: ess={}", ess);
440    }
441    #[test]
442    fn test_importance_sample_uniform_distribution() {
443        let mut rng = rand::rng();
444        let pdf = vec![1.0; 10];
445        let samples = importance_sample(&pdf, 1000, &mut rng);
446        let mut counts = [0usize; 10];
447        for &i in &samples {
448            counts[i] += 1;
449        }
450        for (i, &c) in counts.iter().enumerate() {
451            assert!(c > 50, "index {} appeared only {} times", i, c);
452        }
453    }
454    #[test]
455    fn test_importance_sample_all_to_one_index() {
456        let mut rng = rand::rng();
457        let pdf = vec![1.0, 0.0, 0.0];
458        let samples = importance_sample(&pdf, 50, &mut rng);
459        for &i in &samples {
460            assert_eq!(i, 0, "with all weight on 0, should always sample 0");
461        }
462    }
463    #[test]
464    fn test_mc_control_variate_linear() {
465        let mut rng = Lcg::new(1234);
466        let est = mc_control_variate(|x| x * x, |x| x, 0.5, 0.0, 1.0, 50_000, &mut rng);
467        assert!(
468            (est - 1.0 / 3.0).abs() < 0.01,
469            "control variate estimate={}",
470            est
471        );
472    }
473    #[test]
474    fn test_mc_antithetic_linear() {
475        let mut rng = Lcg::new(5555);
476        let est = mc_antithetic(|x| x, 0.0, 1.0, 10_000, &mut rng);
477        assert!((est - 0.5).abs() < 0.01, "antithetic estimate={}", est);
478    }
479    #[test]
480    fn test_mc_antithetic_quadratic() {
481        let mut rng = Lcg::new(6666);
482        let est = mc_antithetic(|x| x * x, 0.0, 1.0, 20_000, &mut rng);
483        assert!(
484            (est - 1.0 / 3.0).abs() < 0.02,
485            "antithetic quadratic estimate={}",
486            est
487        );
488    }
489    #[test]
490    fn test_stratified_nd_strata_covered_each_dim() {
491        let mut rng = Lcg::new(111);
492        let n = 10;
493        let d = 3;
494        let samples = stratified_sample_nd(n, d, &mut rng);
495        let inv_n = 1.0 / n as f64;
496        for dim in 0..d {
497            let mut occupied = vec![false; n];
498            for s in &samples {
499                let stratum = ((s[dim] / inv_n) as usize).min(n - 1);
500                occupied[stratum] = true;
501            }
502            for (k, &hit) in occupied.iter().enumerate() {
503                assert!(hit, "dim {} stratum {} not covered", dim, k);
504            }
505        }
506    }
507    #[test]
508    fn test_mc_integrate_nd_constant() {
509        let mut rng = Lcg::new(9001);
510        let (est, _) = monte_carlo_integrate_nd(|_| 1.0, 3, 10_000, &mut rng);
511        assert!((est - 1.0).abs() < 0.05, "3D constant integral={}", est);
512    }
513    #[test]
514    fn test_mc_integrate_nd_product() {
515        let mut rng = Lcg::new(9002);
516        let (est, _) = monte_carlo_integrate_nd(|x| x[0] * x[1], 2, 100_000, &mut rng);
517        assert!((est - 0.25).abs() < 0.02, "2D product integral={}", est);
518    }
519}
520/// Owen-scrambled van der Corput sequence for one base.
521///
522/// Applies a deterministic digit-reversal scramble parameterized by `seed`.
523/// This reduces correlation between different bases in a Halton sequence.
524#[allow(dead_code)]
525pub fn van_der_corput_scrambled(mut i: u32, base: u32, seed: u32) -> f64 {
526    let mut result = 0.0_f64;
527    let mut denom = 1.0_f64;
528    let mut perm_seed = seed.wrapping_add(1);
529    while i > 0 {
530        denom *= base as f64;
531        let digit = i % base;
532        let perm = (digit.wrapping_add(perm_seed)) % base;
533        result += perm as f64 / denom;
534        i /= base;
535        perm_seed = perm_seed.wrapping_mul(1664525).wrapping_add(1013904223);
536    }
537    result
538}
539/// Scrambled Halton sequence: `n` points in `n_dims` dimensions.
540///
541/// Uses a deterministic per-dimension scramble to reduce correlation.
542#[allow(dead_code)]
543pub fn halton_scrambled(n: usize, n_dims: usize) -> Vec<Vec<f64>> {
544    pub(super) const PRIMES: [u32; 16] =
545        [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
546    let d = n_dims.min(PRIMES.len());
547    (1..=n)
548        .map(|i| {
549            (0..d)
550                .map(|k| {
551                    van_der_corput_scrambled(
552                        i as u32,
553                        PRIMES[k],
554                        (k as u32).wrapping_mul(2654435761),
555                    )
556                })
557                .collect()
558        })
559        .collect()
560}
561/// Roberts R2 quasi-random sequence (2-D).
562///
563/// Produces a two-dimensional low-discrepancy sequence based on the plastic
564/// constant φ₂ ≈ 1.32471795.  Points lie in `[0,1)²`.
565#[allow(dead_code)]
566pub fn r2_sequence(n: usize) -> Vec<[f64; 2]> {
567    let phi2 = 1.324_717_957_244_746_f64;
568    let a1 = 1.0 / phi2;
569    let a2 = 1.0 / (phi2 * phi2);
570    (0..n)
571        .map(|i| {
572            let x = (0.5 + a1 * (i + 1) as f64).fract();
573            let y = (0.5 + a2 * (i + 1) as f64).fract();
574            [x, y]
575        })
576        .collect()
577}
578/// Roberts R2 sequence for `d` dimensions using the plastic constant.
579#[allow(dead_code)]
580pub fn r2_sequence_nd(n: usize, d: usize) -> Vec<Vec<f64>> {
581    let phi2 = 1.324_717_957_244_746_f64;
582    let alphas: Vec<f64> = (1..=d)
583        .map(|k| {
584            let mut p = 1.0_f64;
585            for _ in 0..k {
586                p /= phi2;
587            }
588            p
589        })
590        .collect();
591    (0..n)
592        .map(|i| {
593            alphas
594                .iter()
595                .map(|&a| (0.5 + a * (i + 1) as f64).fract())
596                .collect()
597        })
598        .collect()
599}
600/// Latin Hypercube sampler (centered strata).
601///
602/// Places one sample at the *center* of each stratum per dimension (no jitter).
603/// This maximises spread but is completely deterministic given `n_samples`.
604#[allow(dead_code)]
605pub fn latin_hypercube_centered(n_samples: usize, n_dims: usize, rng: &mut Lcg) -> Vec<Vec<f64>> {
606    let inv_n = 1.0 / n_samples as f64;
607    let mut result: Vec<Vec<f64>> = (0..n_samples).map(|_| Vec::with_capacity(n_dims)).collect();
608    for _ in 0..n_dims {
609        let mut perm: Vec<usize> = (0..n_samples).collect();
610        for i in (1..n_samples).rev() {
611            let j = (rng.next_u64() as usize) % (i + 1);
612            perm.swap(i, j);
613        }
614        for i in 0..n_samples {
615            let val = (perm[i] as f64 + 0.5) * inv_n;
616            result[i].push(val);
617        }
618    }
619    result
620}
621/// Maximin Latin Hypercube design.
622///
623/// Generates `n_candidates` LHS designs and returns the one that maximises
624/// the minimum pairwise distance between samples.
625#[allow(dead_code)]
626pub fn latin_hypercube_maximin(
627    n_samples: usize,
628    n_dims: usize,
629    n_candidates: usize,
630    rng: &mut Lcg,
631) -> Vec<Vec<f64>> {
632    let lhc = LatinHypercube::new(n_samples, n_dims);
633    let mut best = lhc.sample(rng);
634    let mut best_min_dist = min_pairwise_dist(&best);
635    for _ in 1..n_candidates {
636        let candidate = lhc.sample(rng);
637        let d = min_pairwise_dist(&candidate);
638        if d > best_min_dist {
639            best_min_dist = d;
640            best = candidate;
641        }
642    }
643    best
644}
645/// Compute the minimum pairwise Euclidean distance among a set of points (maximin variant).
646#[allow(dead_code)]
647pub(super) fn min_pairwise_dist_maximin(pts: &[Vec<f64>]) -> f64 {
648    let n = pts.len();
649    if n < 2 {
650        return f64::INFINITY;
651    }
652    let mut min_d = f64::INFINITY;
653    for i in 0..n {
654        for j in (i + 1)..n {
655            let d2: f64 = pts[i]
656                .iter()
657                .zip(pts[j].iter())
658                .map(|(a, b)| (a - b) * (a - b))
659                .sum();
660            if d2 < min_d {
661                min_d = d2;
662            }
663        }
664    }
665    min_d.sqrt()
666}
667/// Generate `n` stratified samples on the surface of the unit sphere.
668///
669/// Divides the sphere into an equal-area latitude-longitude grid of
670/// `n_lat × n_lon` cells and places one jittered sample per cell.
671/// Returns unit vectors `[x, y, z]`.
672#[allow(dead_code)]
673pub fn stratified_sphere_samples(n_lat: usize, n_lon: usize, rng: &mut Lcg) -> Vec<[f64; 3]> {
674    let mut samples = Vec::with_capacity(n_lat * n_lon);
675    let inv_lat = 1.0 / n_lat as f64;
676    let inv_lon = 1.0 / n_lon as f64;
677    for il in 0..n_lat {
678        for ip in 0..n_lon {
679            let u1 = (il as f64 + rng.next_f64()) * inv_lat;
680            let u2 = (ip as f64 + rng.next_f64()) * inv_lon;
681            let cos_theta = 1.0 - 2.0 * u1;
682            let sin_theta = (1.0 - cos_theta * cos_theta).max(0.0).sqrt();
683            let phi = 2.0 * std::f64::consts::PI * u2;
684            samples.push([sin_theta * phi.cos(), sin_theta * phi.sin(), cos_theta]);
685        }
686    }
687    samples
688}
689/// Inverse-CDF importance sampling for a target distribution defined by its
690/// CDF, computed numerically on a grid.
691///
692/// `cdf_vals[i]` = CDF at `x_min + i * dx`.  Draws `n` samples.
693#[allow(dead_code)]
694pub fn inverse_cdf_sample(
695    cdf_vals: &[f64],
696    x_min: f64,
697    x_max: f64,
698    n: usize,
699    rng: &mut Lcg,
700) -> Vec<f64> {
701    let m = cdf_vals.len();
702    assert!(m >= 2, "cdf_vals must have at least 2 entries");
703    let dx = (x_max - x_min) / (m - 1) as f64;
704    let mut samples = Vec::with_capacity(n);
705    for _ in 0..n {
706        let u = rng.next_f64();
707        let idx = cdf_vals.partition_point(|&c| c < u).min(m - 1);
708        let x = if idx == 0 {
709            x_min
710        } else {
711            let c_lo = cdf_vals[idx - 1];
712            let c_hi = cdf_vals[idx];
713            let frac = if (c_hi - c_lo).abs() < 1e-300 {
714                0.0
715            } else {
716                (u - c_lo) / (c_hi - c_lo)
717            };
718            x_min + (idx as f64 - 1.0 + frac) * dx
719        };
720        samples.push(x);
721    }
722    samples
723}
724/// Compute a numerical CDF from a non-negative PDF sampled on a uniform grid.
725///
726/// `pdf_vals[i]` = PDF(x_min + i * dx).  Returns normalized CDF values on the
727/// same grid.
728#[allow(dead_code)]
729pub fn pdf_to_cdf(pdf_vals: &[f64], x_min: f64, x_max: f64) -> Vec<f64> {
730    let m = pdf_vals.len();
731    if m == 0 {
732        return Vec::new();
733    }
734    let dx = (x_max - x_min) / (m as f64 - 1.0).max(1.0);
735    let mut cdf = Vec::with_capacity(m);
736    let mut acc = 0.0_f64;
737    cdf.push(0.0);
738    for i in 1..m {
739        acc += (pdf_vals[i - 1] + pdf_vals[i]) * 0.5 * dx;
740        cdf.push(acc);
741    }
742    let total = *cdf.last().expect("cdf is non-empty");
743    if total > 1e-300 {
744        for c in &mut cdf {
745            *c /= total;
746        }
747    }
748    if let Some(last) = cdf.last_mut() {
749        *last = 1.0;
750    }
751    cdf
752}
753/// 1-D rejection sampling from an un-normalized density `f` on `[a, b]`.
754///
755/// `envelope` must satisfy `f(x) ≤ envelope` for all `x ∈ [a, b]`.
756/// Draws exactly `n` accepted samples. Uses `Lcg` RNG.
757#[allow(dead_code)]
758pub fn rejection_sample_1d_lcg(
759    f: impl Fn(f64) -> f64,
760    a: f64,
761    b: f64,
762    envelope: f64,
763    n: usize,
764    rng: &mut Lcg,
765) -> Vec<f64> {
766    let mut samples = Vec::with_capacity(n);
767    while samples.len() < n {
768        let x = rng.next_f64_range(a, b);
769        let u = rng.next_f64() * envelope;
770        if u <= f(x) {
771            samples.push(x);
772        }
773    }
774    samples
775}
776/// 1-D rejection sampling with automatic envelope estimation.
777///
778/// Estimates the maximum of `f` on a grid of `grid_size` points, then
779/// applies standard rejection sampling.
780#[allow(dead_code)]
781pub fn rejection_sample_1d_auto(
782    f: impl Fn(f64) -> f64,
783    a: f64,
784    b: f64,
785    n: usize,
786    grid_size: usize,
787    rng: &mut Lcg,
788) -> Vec<f64> {
789    let envelope = {
790        let mut max_f = 1e-300_f64;
791        for k in 0..grid_size {
792            let x = a + (k as f64 + 0.5) / grid_size as f64 * (b - a);
793            max_f = max_f.max(f(x));
794        }
795        max_f * 1.01
796    };
797    rejection_sample_1d(f, a, b, envelope, n, rng)
798}
799/// Generate `n` points from a digit-scrambled Sobol sequence in `d` dimensions.
800///
801/// Each dimension is XOR-scrambled with a distinct 32-bit seed for better
802/// uniformity.
803#[allow(dead_code)]
804pub fn sobol_scrambled(n: usize, d: usize) -> Vec<Vec<f64>> {
805    pub(super) const BITS: usize = 32;
806    let n_dims = d.min(3);
807    let dir: Vec<Vec<u32>> = (0..n_dims).map(SobolSequence::direction_numbers).collect();
808    let seeds: Vec<u32> = (0..n_dims as u32)
809        .map(|k| k.wrapping_mul(2654435761).wrapping_add(1))
810        .collect();
811    (0..n)
812        .map(|idx| {
813            let gray = idx ^ (idx >> 1);
814            (0..n_dims)
815                .map(|dd| {
816                    let x = (0..BITS)
817                        .filter(|&i| (gray >> i) & 1 == 1)
818                        .fold(0u32, |acc, i| acc ^ dir[dd][i]);
819                    let x_scrambled = x ^ seeds[dd];
820                    x_scrambled as f64 / (1u64 << BITS) as f64
821                })
822                .collect()
823        })
824        .collect()
825}
826/// Sample mean of a slice.
827#[allow(dead_code)]
828pub fn sample_mean(xs: &[f64]) -> f64 {
829    if xs.is_empty() {
830        return 0.0;
831    }
832    xs.iter().sum::<f64>() / xs.len() as f64
833}
834/// Sample variance (Bessel's corrected, `1/(n-1)`) of a slice.
835#[allow(dead_code)]
836pub fn sample_variance(xs: &[f64]) -> f64 {
837    let n = xs.len();
838    if n < 2 {
839        return 0.0;
840    }
841    let mean = sample_mean(xs);
842    xs.iter().map(|&x| (x - mean) * (x - mean)).sum::<f64>() / (n - 1) as f64
843}
844/// Sample standard deviation.
845#[allow(dead_code)]
846pub fn sample_std(xs: &[f64]) -> f64 {
847    sample_variance(xs).sqrt()
848}
849/// Sample skewness.
850#[allow(dead_code)]
851pub fn sample_skewness(xs: &[f64]) -> f64 {
852    let n = xs.len();
853    if n < 3 {
854        return 0.0;
855    }
856    let mean = sample_mean(xs);
857    let std = sample_std(xs);
858    if std < 1e-300 {
859        return 0.0;
860    }
861    let n_f = n as f64;
862    let m3 = xs.iter().map(|&x| ((x - mean) / std).powi(3)).sum::<f64>() / n_f;
863    m3 * (n_f * n_f) / ((n_f - 1.0) * (n_f - 2.0))
864}
865/// Sample excess kurtosis (Fisher's definition, 0 for Gaussian).
866#[allow(dead_code)]
867pub fn sample_kurtosis(xs: &[f64]) -> f64 {
868    let n = xs.len();
869    if n < 4 {
870        return 0.0;
871    }
872    let mean = sample_mean(xs);
873    let std = sample_std(xs);
874    if std < 1e-300 {
875        return 0.0;
876    }
877    let n_f = n as f64;
878    let m4 = xs.iter().map(|&x| ((x - mean) / std).powi(4)).sum::<f64>() / n_f;
879    (n_f * (n_f + 1.0) * m4 - 3.0 * (n_f - 1.0).powi(2)) / ((n_f - 1.0) * (n_f - 2.0) * (n_f - 3.0))
880}
881/// Compute empirical quantiles at fractional positions in `qs`.
882///
883/// `xs` need not be sorted (a copy is sorted internally).  Each value in `qs`
884/// must be in `[0, 1]`.
885#[allow(dead_code)]
886pub fn empirical_quantiles(xs: &[f64], qs: &[f64]) -> Vec<f64> {
887    if xs.is_empty() {
888        return vec![f64::NAN; qs.len()];
889    }
890    let mut sorted = xs.to_vec();
891    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
892    qs.iter()
893        .map(|&q| {
894            let idx_f = q * (sorted.len() - 1) as f64;
895            let lo = idx_f.floor() as usize;
896            let hi = (lo + 1).min(sorted.len() - 1);
897            let frac = idx_f.fract();
898            sorted[lo] * (1.0 - frac) + sorted[hi] * frac
899        })
900        .collect()
901}
902/// Kolmogorov-Smirnov statistic between two sorted empirical CDFs.
903///
904/// Computes `sup |F_n(x) - G_m(x)|`.  Both slices must be sorted ascending.
905#[allow(dead_code)]
906pub fn ks_statistic(xs: &[f64], ys: &[f64]) -> f64 {
907    let n = xs.len();
908    let m = ys.len();
909    if n == 0 || m == 0 {
910        return 0.0;
911    }
912    let mut i = 0usize;
913    let mut j = 0usize;
914    let mut max_diff = 0.0_f64;
915    while i < n || j < m {
916        let x_val = if i < n { xs[i] } else { f64::INFINITY };
917        let y_val = if j < m { ys[j] } else { f64::INFINITY };
918        if (x_val - y_val).abs() < 1e-15 {
919            i += 1;
920            j += 1;
921        } else if x_val < y_val {
922            i += 1;
923        } else {
924            j += 1;
925        }
926        let diff = (i as f64 / n as f64 - j as f64 / m as f64).abs();
927        if diff > max_diff {
928            max_diff = diff;
929        }
930    }
931    max_diff
932}
933/// Reservoir sampling (Vitter's Algorithm R).
934///
935/// Uniformly samples `k` items from an iterator of unknown size without
936/// storing the entire sequence.
937#[allow(dead_code)]
938pub fn reservoir_sample<T: Clone>(items: &[T], k: usize, rng: &mut Lcg) -> Vec<T> {
939    let n = items.len();
940    if k == 0 || n == 0 {
941        return Vec::new();
942    }
943    let k_actual = k.min(n);
944    let mut reservoir: Vec<T> = items[..k_actual].to_vec();
945    for i in k_actual..n {
946        let j = (rng.next_u64() % (i + 1) as u64) as usize;
947        if j < k_actual {
948            reservoir[j] = items[i].clone();
949        }
950    }
951    reservoir
952}
953/// Weighted reservoir sample of `k` indices from a weight vector.
954///
955/// Uses Algorithm A-Res (Efraimidis & Spirakis 2006).
956#[allow(dead_code)]
957pub fn weighted_reservoir_sample(weights: &[f64], k: usize, rng: &mut Lcg) -> Vec<usize> {
958    let n = weights.len();
959    if k == 0 || n == 0 {
960        return Vec::new();
961    }
962    let k_actual = k.min(n);
963    let mut heap: Vec<(f64, usize)> = Vec::with_capacity(k_actual);
964    for (i, &w) in weights.iter().enumerate() {
965        if w <= 0.0 {
966            continue;
967        }
968        let u = rng.next_f64().max(1e-300);
969        let key = u.powf(1.0 / w);
970        if heap.len() < k_actual {
971            heap.push((key, i));
972            if heap.len() == k_actual {
973                heap.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
974            }
975        } else if key > heap[0].0 {
976            heap[0] = (key, i);
977            heap.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
978        }
979    }
980    heap.iter().map(|&(_, i)| i).collect()
981}
982#[cfg(test)]
983mod extended_sampling_tests_v2 {
984    use super::*;
985    use crate::random_processes::sample_mean;
986    use crate::random_processes::sample_variance;
987    use crate::sample_kurtosis;
988    use crate::sample_skewness;
989
990    use crate::sampling::LatinHypercube;
991    use crate::sampling::Lcg;
992
993    use crate::sampling::empirical_quantiles;
994    use crate::sampling::halton_scrambled;
995    use crate::sampling::inverse_cdf_sample;
996    use crate::sampling::latin_hypercube_centered;
997    use crate::sampling::latin_hypercube_maximin;
998    use crate::sampling::min_pairwise_dist;
999    use crate::sampling::pdf_to_cdf;
1000    use crate::sampling::r2_sequence;
1001    use crate::sampling::r2_sequence_nd;
1002    use crate::sampling::rejection_sample_1d;
1003    use crate::sampling::rejection_sample_1d_auto;
1004    use crate::sampling::reservoir_sample;
1005    use crate::sampling::sample_std;
1006    use crate::sampling::sobol_scrambled;
1007    use crate::sampling::stratified_sphere_samples;
1008    use crate::sampling::van_der_corput_scrambled;
1009    use crate::sampling::weighted_reservoir_sample;
1010    #[test]
1011    fn test_vdc_scrambled_in_unit_interval() {
1012        for i in 1..=20u32 {
1013            let v = van_der_corput_scrambled(i, 2, 12345);
1014            assert!((0.0..1.0).contains(&v), "vdc_scrambled out of [0,1): {}", v);
1015        }
1016    }
1017    #[test]
1018    fn test_vdc_scrambled_different_seeds_differ() {
1019        let v1 = van_der_corput_scrambled(7, 2, 0);
1020        let v2 = van_der_corput_scrambled(7, 2, 99999);
1021        let _ = (v1, v2);
1022    }
1023    #[test]
1024    fn test_halton_scrambled_dimensions() {
1025        let pts = halton_scrambled(10, 3);
1026        assert_eq!(pts.len(), 10);
1027        for p in &pts {
1028            assert_eq!(p.len(), 3);
1029            for &v in p {
1030                assert!((0.0..1.0).contains(&v), "value {} out of [0,1)", v);
1031            }
1032        }
1033    }
1034    #[test]
1035    fn test_r2_sequence_in_unit_square() {
1036        let pts = r2_sequence(20);
1037        assert_eq!(pts.len(), 20);
1038        for p in &pts {
1039            assert!(p[0] >= 0.0 && p[0] < 1.0);
1040            assert!(p[1] >= 0.0 && p[1] < 1.0);
1041        }
1042    }
1043    #[test]
1044    fn test_r2_sequence_nd_correct_dims() {
1045        let pts = r2_sequence_nd(5, 4);
1046        assert_eq!(pts.len(), 5);
1047        for p in &pts {
1048            assert_eq!(p.len(), 4);
1049        }
1050    }
1051    #[test]
1052    fn test_lhc_centered_strata_exact() {
1053        let mut rng = Lcg::new(42);
1054        let n = 8;
1055        let pts = latin_hypercube_centered(n, 2, &mut rng);
1056        assert_eq!(pts.len(), n);
1057        let inv_n = 1.0 / n as f64;
1058        for d in 0..2 {
1059            let mut occupied = vec![false; n];
1060            for p in &pts {
1061                let stratum = ((p[d] / inv_n) as usize).min(n - 1);
1062                occupied[stratum] = true;
1063            }
1064            assert!(occupied.iter().all(|&h| h), "all strata should be occupied");
1065        }
1066    }
1067    #[test]
1068    fn test_lhc_maximin_returns_correct_size() {
1069        let mut rng = Lcg::new(1001);
1070        let pts = latin_hypercube_maximin(8, 2, 5, &mut rng);
1071        assert_eq!(pts.len(), 8);
1072    }
1073    #[test]
1074    fn test_lhc_maximin_better_than_single() {
1075        let mut rng1 = Lcg::new(77);
1076        let mut rng2 = Lcg::new(77);
1077        let single = LatinHypercube::new(12, 2).sample(&mut rng1);
1078        let maximin = latin_hypercube_maximin(12, 2, 10, &mut rng2);
1079        let d_single = min_pairwise_dist(&single);
1080        let d_maximin = min_pairwise_dist(&maximin);
1081        assert!(
1082            d_maximin >= d_single * 0.9,
1083            "maximin dist={} single dist={}",
1084            d_maximin,
1085            d_single
1086        );
1087    }
1088    #[test]
1089    fn test_stratified_sphere_samples_unit_length() {
1090        let mut rng = Lcg::new(55);
1091        let pts = stratified_sphere_samples(4, 8, &mut rng);
1092        assert_eq!(pts.len(), 32);
1093        for p in &pts {
1094            let r2 = p[0] * p[0] + p[1] * p[1] + p[2] * p[2];
1095            assert!((r2 - 1.0).abs() < 1e-10, "r²={}", r2);
1096        }
1097    }
1098    #[test]
1099    fn test_stratified_sphere_samples_all_dims_covered() {
1100        let mut rng = Lcg::new(56);
1101        let pts = stratified_sphere_samples(4, 4, &mut rng);
1102        let has_pos_z = pts.iter().any(|p| p[2] > 0.0);
1103        let has_neg_z = pts.iter().any(|p| p[2] < 0.0);
1104        assert!(has_pos_z, "no positive z");
1105        assert!(has_neg_z, "no negative z");
1106    }
1107    #[test]
1108    fn test_pdf_to_cdf_uniform() {
1109        let pdf = vec![1.0_f64; 11];
1110        let cdf = pdf_to_cdf(&pdf, 0.0, 1.0);
1111        assert_eq!(cdf.len(), 11);
1112        assert!(cdf[0].abs() < 1e-10, "CDF(0)={}", cdf[0]);
1113        assert!((cdf[10] - 1.0).abs() < 1e-10, "CDF(1)={}", cdf[10]);
1114    }
1115    #[test]
1116    fn test_pdf_to_cdf_monotone() {
1117        let pdf = vec![1.0, 2.0, 3.0, 2.0, 1.0];
1118        let cdf = pdf_to_cdf(&pdf, 0.0, 1.0);
1119        for i in 1..cdf.len() {
1120            assert!(cdf[i] >= cdf[i - 1], "CDF not monotone at i={}", i);
1121        }
1122    }
1123    #[test]
1124    fn test_inverse_cdf_sample_in_range() {
1125        let pdf = vec![1.0_f64; 101];
1126        let cdf = pdf_to_cdf(&pdf, 0.0, 1.0);
1127        let mut rng = Lcg::new(314);
1128        let samples = inverse_cdf_sample(&cdf, 0.0, 1.0, 100, &mut rng);
1129        for &s in &samples {
1130            assert!((0.0..=1.0).contains(&s), "sample {} out of range", s);
1131        }
1132    }
1133    #[test]
1134    fn test_rejection_sample_1d_count() {
1135        let mut rng = Lcg::new(999);
1136        let samples = rejection_sample_1d(|x| x * x, 0.0, 1.0, 1.0, 50, &mut rng);
1137        assert_eq!(samples.len(), 50, "should return exactly 50 samples");
1138    }
1139    #[test]
1140    fn test_rejection_sample_1d_in_range() {
1141        let mut rng = Lcg::new(888);
1142        let samples = rejection_sample_1d(|x| (-x * x).exp(), -3.0, 3.0, 1.01, 30, &mut rng);
1143        for &s in &samples {
1144            assert!((-3.0..=3.0).contains(&s), "sample {} out of range", s);
1145        }
1146    }
1147    #[test]
1148    fn test_rejection_sample_1d_auto_count() {
1149        let mut rng = Lcg::new(777);
1150        let samples = rejection_sample_1d_auto(
1151            |x| x.sin().powi(2),
1152            0.0,
1153            std::f64::consts::PI,
1154            20,
1155            50,
1156            &mut rng,
1157        );
1158        assert_eq!(samples.len(), 20);
1159    }
1160    #[test]
1161    fn test_sobol_scrambled_in_unit_cube() {
1162        let pts = sobol_scrambled(16, 3);
1163        assert_eq!(pts.len(), 16);
1164        for p in &pts {
1165            for &v in p {
1166                assert!(
1167                    (0.0..1.0).contains(&v),
1168                    "sobol_scrambled value out of [0,1)"
1169                );
1170            }
1171        }
1172    }
1173    #[test]
1174    fn test_sample_mean_uniform() {
1175        let xs: Vec<f64> = (0..100).map(|i| i as f64 / 100.0).collect();
1176        let mean = sample_mean(&xs);
1177        assert!((mean - 0.495).abs() < 0.01, "mean={}", mean);
1178    }
1179    #[test]
1180    fn test_sample_variance_constant() {
1181        let xs = vec![3.0_f64; 10];
1182        let v = sample_variance(&xs);
1183        assert!(v.abs() < 1e-12, "variance of constant should be 0: {}", v);
1184    }
1185    #[test]
1186    fn test_sample_std_nonneg() {
1187        let xs = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1188        let s = sample_std(&xs);
1189        assert!(s >= 0.0, "std should be non-negative");
1190    }
1191    #[test]
1192    fn test_sample_skewness_symmetric() {
1193        let xs: Vec<f64> = (0..100).map(|i| i as f64 - 50.0).collect();
1194        let sk = sample_skewness(&xs);
1195        assert!(sk.abs() < 0.1, "symmetric dist skewness={}", sk);
1196    }
1197    #[test]
1198    fn test_sample_kurtosis_uniform() {
1199        let xs: Vec<f64> = (0..1000).map(|i| i as f64 / 1000.0).collect();
1200        let k = sample_kurtosis(&xs);
1201        assert!(k < 0.0, "uniform kurtosis should be negative: {}", k);
1202    }
1203    #[test]
1204    fn test_empirical_quantiles_median() {
1205        let xs: Vec<f64> = (1..=9).map(|i| i as f64).collect();
1206        let qs = empirical_quantiles(&xs, &[0.5]);
1207        assert!((qs[0] - 5.0).abs() < 0.5, "median={}", qs[0]);
1208    }
1209    #[test]
1210    fn test_empirical_quantiles_min_max() {
1211        let xs = vec![3.0, 1.0, 4.0, 1.0, 5.0, 9.0];
1212        let qs = empirical_quantiles(&xs, &[0.0, 1.0]);
1213        assert!((qs[0] - 1.0).abs() < 1e-10, "min={}", qs[0]);
1214        assert!((qs[1] - 9.0).abs() < 1e-10, "max={}", qs[1]);
1215    }
1216    #[test]
1217    fn test_ks_same_distribution() {
1218        let xs: Vec<f64> = (0..20).map(|i| i as f64).collect();
1219        let ks = ks_statistic(&xs, &xs);
1220        assert!(
1221            ks.abs() < 0.1,
1222            "KS between same distribution should be ~0: {}",
1223            ks
1224        );
1225    }
1226    #[test]
1227    fn test_ks_different_distributions() {
1228        let xs: Vec<f64> = (0..20).map(|i| i as f64).collect();
1229        let ys: Vec<f64> = (20..40).map(|i| i as f64).collect();
1230        let ks = ks_statistic(&xs, &ys);
1231        assert!(
1232            ks > 0.5,
1233            "KS between disjoint distributions should be large: {}",
1234            ks
1235        );
1236    }
1237    #[test]
1238    fn test_reservoir_sample_correct_size() {
1239        let mut rng = Lcg::new(1337);
1240        let data: Vec<i32> = (0..100).collect();
1241        let sample = reservoir_sample(&data, 10, &mut rng);
1242        assert_eq!(sample.len(), 10);
1243    }
1244    #[test]
1245    fn test_reservoir_sample_all_from_source() {
1246        let mut rng = Lcg::new(2048);
1247        let data: Vec<i32> = (0..20).collect();
1248        let sample = reservoir_sample(&data, 5, &mut rng);
1249        for &v in &sample {
1250            assert!(data.contains(&v), "sampled value {} not in source", v);
1251        }
1252    }
1253    #[test]
1254    fn test_reservoir_sample_k_larger_than_n() {
1255        let mut rng = Lcg::new(4096);
1256        let data = vec![1.0, 2.0, 3.0];
1257        let sample = reservoir_sample(&data, 10, &mut rng);
1258        assert_eq!(sample.len(), 3);
1259    }
1260    #[test]
1261    fn test_weighted_reservoir_correct_size() {
1262        let mut rng = Lcg::new(7777);
1263        let weights = vec![1.0, 2.0, 3.0, 0.5, 4.0];
1264        let idx = weighted_reservoir_sample(&weights, 3, &mut rng);
1265        assert_eq!(idx.len(), 3);
1266    }
1267    #[test]
1268    fn test_weighted_reservoir_valid_indices() {
1269        let mut rng = Lcg::new(8888);
1270        let weights = vec![1.0, 1.0, 1.0, 1.0, 1.0];
1271        let idx = weighted_reservoir_sample(&weights, 3, &mut rng);
1272        for &i in &idx {
1273            assert!(i < 5, "index {} out of range", i);
1274        }
1275    }
1276}