Skip to main content

oxiphysics_core/sampling/
functions.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::types::{HaltonSequence, LatinHypercube, Lcg, Sobol, SobolSequence};
6use rand::RngExt;
7
8/// Sample a point uniformly on the surface of the unit sphere (Marsaglia 1972).
9///
10/// Returns `[x, y, z]` with `x² + y² + z² = 1`.
11pub fn sample_sphere_surface(rng: &mut Lcg) -> [f64; 3] {
12    loop {
13        let x = rng.next_f64_range(-1.0, 1.0);
14        let y = rng.next_f64_range(-1.0, 1.0);
15        let s = x * x + y * y;
16        if s >= 1.0 {
17            continue;
18        }
19        let r = (1.0 - s).sqrt();
20        return [2.0 * x * r, 2.0 * y * r, 1.0 - 2.0 * s];
21    }
22}
23/// Sample a point uniformly inside the closed unit ball (rejection sampling).
24///
25/// Returns `[x, y, z]` with `x² + y² + z² ≤ 1`.
26pub fn sample_sphere_volume(rng: &mut Lcg) -> [f64; 3] {
27    loop {
28        let x = rng.next_f64_range(-1.0, 1.0);
29        let y = rng.next_f64_range(-1.0, 1.0);
30        let z = rng.next_f64_range(-1.0, 1.0);
31        if x * x + y * y + z * z <= 1.0 {
32            return [x, y, z];
33        }
34    }
35}
36/// Sample a 3-D Gaussian vector with zero mean and isotropic standard deviation
37/// `sigma`.
38pub fn sample_gaussian_3d(rng: &mut Lcg, sigma: f64) -> [f64; 3] {
39    let (z0, z1) = rng.next_normal_pair();
40    let z2 = rng.next_normal();
41    [sigma * z0, sigma * z1, sigma * z2]
42}
43/// Sample a 3-D velocity vector from the Maxwell–Boltzmann distribution.
44///
45/// Each velocity component is drawn from `N(0, sqrt(k_B T / m))`.
46///
47/// # Arguments
48/// * `mass` – particle mass
49/// * `temp` – temperature in Kelvin
50/// * `kb`   – Boltzmann constant (use `1.380649e-23` for SI units)
51pub fn sample_maxwell_boltzmann(rng: &mut Lcg, mass: f64, temp: f64, kb: f64) -> [f64; 3] {
52    let sigma = (kb * temp / mass).sqrt();
53    sample_gaussian_3d(rng, sigma)
54}
55/// Stratified 1-D sampling.
56///
57/// Divides `[0, 1)` into `n` equal strata of width `1/n`.  One uniform random
58/// point is placed inside each stratum, so the returned vector has exactly `n`
59/// elements and every stratum `[k/n, (k+1)/n)` is represented.
60pub fn stratified_sample_1d(n: usize, rng: &mut Lcg) -> Vec<f64> {
61    let inv_n = 1.0 / n as f64;
62    (0..n)
63        .map(|k| (k as f64 + rng.next_f64()) * inv_n)
64        .collect()
65}
66/// Trait for quasi-random sequences.
67#[allow(dead_code)]
68pub trait QuasiRandomSequence {
69    /// Return the next point in the sequence.
70    fn next(&mut self) -> Vec<f64>;
71}
72/// Generate 2D Poisson disk (blue noise) samples using Bridson's algorithm.
73///
74/// Returns at most `n` sample points in the unit square \[0,1\]² with minimum
75/// separation distance `r`. The actual number of generated points may be less
76/// than `n` if the domain cannot accommodate more.
77pub fn blue_noise_2d(n: usize, r: f64, rng: &mut Lcg) -> Vec<[f64; 2]> {
78    let mut samples: Vec<[f64; 2]> = Vec::with_capacity(n);
79    let mut active: Vec<usize> = Vec::new();
80    let r2 = r * r;
81    let k = 30;
82    let first = [rng.next_f64(), rng.next_f64()];
83    samples.push(first);
84    active.push(0);
85    while !active.is_empty() && samples.len() < n {
86        let idx = (rng.next_u64() as usize) % active.len();
87        let parent_idx = active[idx];
88        let parent = samples[parent_idx];
89        let mut found = false;
90        for _ in 0..k {
91            let angle = rng.next_f64_range(0.0, 2.0 * std::f64::consts::PI);
92            let radius = rng.next_f64_range(r, 2.0 * r);
93            let candidate = [
94                parent[0] + radius * angle.cos(),
95                parent[1] + radius * angle.sin(),
96            ];
97            if candidate[0] < 0.0
98                || candidate[0] >= 1.0
99                || candidate[1] < 0.0
100                || candidate[1] >= 1.0
101            {
102                continue;
103            }
104            let valid = samples.iter().all(|&s| {
105                let dx = s[0] - candidate[0];
106                let dy = s[1] - candidate[1];
107                dx * dx + dy * dy >= r2
108            });
109            if valid {
110                let new_idx = samples.len();
111                samples.push(candidate);
112                active.push(new_idx);
113                found = true;
114                break;
115            }
116        }
117        if !found {
118            active.remove(idx);
119        }
120    }
121    samples
122}
123/// Generate `n_samples` stratified samples in `n_dims` dimensions.
124///
125/// Each dimension is independently stratified into `n_samples` bins;
126/// samples are uniformly jittered within each bin.  This is a simplified
127/// (independent-per-dimension) version with no joint stratification.
128pub fn stratified_sample_nd(n_samples: usize, n_dims: usize, rng: &mut Lcg) -> Vec<Vec<f64>> {
129    let inv_n = 1.0 / n_samples as f64;
130    let mut result: Vec<Vec<f64>> = (0..n_samples).map(|_| Vec::with_capacity(n_dims)).collect();
131    for _ in 0..n_dims {
132        let mut strata: Vec<usize> = (0..n_samples).collect();
133        for i in (1..n_samples).rev() {
134            let j = (rng.next_u64() as usize) % (i + 1);
135            strata.swap(i, j);
136        }
137        for i in 0..n_samples {
138            let val = (strata[i] as f64 + rng.next_f64()) * inv_n;
139            result[i].push(val);
140        }
141    }
142    result
143}
144/// Systematic resampling for particle filters.
145///
146/// Given `particles` and their (unnormalized) `weights`, draw `n_out` particles
147/// with replacement proportional to weights.
148pub fn systematic_resample(
149    particles: &[f64],
150    weights: &[f64],
151    n_out: usize,
152    rng: &mut Lcg,
153) -> Vec<f64> {
154    let n = particles.len().min(weights.len());
155    if n == 0 || n_out == 0 {
156        return Vec::new();
157    }
158    let total: f64 = weights[..n].iter().sum();
159    let norm: Vec<f64> = weights[..n].iter().map(|&w| w / total).collect();
160    let mut cumsum = Vec::with_capacity(n);
161    let mut acc = 0.0;
162    for &w in &norm {
163        acc += w;
164        cumsum.push(acc);
165    }
166    let u0 = rng.next_f64() / n_out as f64;
167    let step = 1.0 / n_out as f64;
168    let mut result = Vec::with_capacity(n_out);
169    let mut j = 0usize;
170    for i in 0..n_out {
171        let u = u0 + i as f64 * step;
172        while j < n - 1 && cumsum[j] < u {
173            j += 1;
174        }
175        result.push(particles[j]);
176    }
177    result
178}
179/// Generate `n` multi-dimensional Halton points using the first `n_dims` primes as bases.
180///
181/// Returns `n` points, each of dimension `n_dims`, in `[0,1)^{n_dims}`.
182pub fn halton_multivariate(n: usize, n_dims: usize) -> Vec<Vec<f64>> {
183    pub(super) const PRIMES: [u32; 10] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
184    let d = n_dims.min(PRIMES.len());
185    (1..=n)
186        .map(|i| {
187            (0..d)
188                .map(|k| HaltonSequence::van_der_corput(i as u32, PRIMES[k]))
189                .collect()
190        })
191        .collect()
192}
193/// Latin Hypercube Sample: `n` points in `d` dimensions, each in `[0,1)`.
194///
195/// Uses `rand::Rng` for shuffling and jittering.
196#[allow(dead_code)]
197pub fn latin_hypercube_sample(n: usize, d: usize, rng: &mut impl rand::Rng) -> Vec<Vec<f64>> {
198    let inv_n = 1.0 / n as f64;
199    let mut result: Vec<Vec<f64>> = (0..n).map(|_| Vec::with_capacity(d)).collect();
200    for _ in 0..d {
201        let mut perm: Vec<usize> = (0..n).collect();
202        for i in (1..n).rev() {
203            let j = rng.random_range(0..=i);
204            perm.swap(i, j);
205        }
206        for i in 0..n {
207            let jitter: f64 = rng.random();
208            let val = (perm[i] as f64 + jitter) * inv_n;
209            result[i].push(val);
210        }
211    }
212    result
213}
214/// Sobol' quasi-random sequence: `n` points in `d` dimensions (up to 3 dims).
215///
216/// Uses the same direction numbers as [`SobolSequence`].
217#[allow(dead_code)]
218pub fn sobol_sequence(n: usize, d: usize) -> Vec<Vec<f64>> {
219    SobolSequence::new(d).sample(n)
220}
221/// Stratified (jittered) 1-D samples using `rand::Rng`.
222///
223/// Divides `[0,1)` into `n` strata; one jittered sample per stratum.
224#[allow(dead_code)]
225pub fn stratified_sample_1d_rng(n: usize, rng: &mut impl rand::Rng) -> Vec<f64> {
226    let inv_n = 1.0 / n as f64;
227    (0..n)
228        .map(|k| {
229            let jitter: f64 = rng.random();
230            (k as f64 + jitter) * inv_n
231        })
232        .collect()
233}
234/// Discrete inverse-CDF (importance) sampling.
235///
236/// Draws `n` indices from `0..pdf.len()` with probability proportional to
237/// `pdf[i]`.  Returns indices (with replacement).
238///
239/// # Panics
240/// Panics if `pdf` is empty or all weights are zero.
241#[allow(dead_code)]
242pub fn importance_sample(pdf: &[f64], n: usize, rng: &mut impl rand::Rng) -> Vec<usize> {
243    assert!(!pdf.is_empty(), "pdf must not be empty");
244    let total: f64 = pdf.iter().sum();
245    assert!(total > 0.0, "pdf weights must sum to a positive value");
246    let mut cdf = Vec::with_capacity(pdf.len());
247    let mut acc = 0.0;
248    for &w in pdf {
249        acc += w / total;
250        cdf.push(acc);
251    }
252    if let Some(last) = cdf.last_mut() {
253        *last = 1.0;
254    }
255    let mut samples = Vec::with_capacity(n);
256    for _ in 0..n {
257        let u: f64 = rng.random();
258        let idx = cdf.partition_point(|&c| c < u);
259        samples.push(idx.min(pdf.len() - 1));
260    }
261    samples
262}
263/// 2-D rejection sampling.
264///
265/// Draws `n` uniform samples from `[x_range.0, x_range.1) × [y_range.0, y_range.1)`
266/// and accepts `(x, y)` when `u < f(x, y)` where `u ~ U[0, max_f]`.
267///
268/// `f` must be non-negative; the function evaluates an internal upper bound
269/// by sampling a small grid.
270#[allow(dead_code)]
271pub fn rejection_sample_2d<F>(
272    f: F,
273    x_range: (f64, f64),
274    y_range: (f64, f64),
275    n: usize,
276    rng: &mut impl rand::Rng,
277) -> Vec<[f64; 2]>
278where
279    F: Fn(f64, f64) -> f64,
280{
281    let mut max_f = 1e-30_f64;
282    for ix in 0..20 {
283        for iy in 0..20 {
284            let x = x_range.0 + (ix as f64 + 0.5) / 20.0 * (x_range.1 - x_range.0);
285            let y = y_range.0 + (iy as f64 + 0.5) / 20.0 * (y_range.1 - y_range.0);
286            max_f = max_f.max(f(x, y));
287        }
288    }
289    if max_f <= 0.0 {
290        return Vec::new();
291    }
292    let mut samples = Vec::with_capacity(n);
293    while samples.len() < n {
294        let x: f64 = x_range.0 + rng.random::<f64>() * (x_range.1 - x_range.0);
295        let y: f64 = y_range.0 + rng.random::<f64>() * (y_range.1 - y_range.0);
296        let u: f64 = rng.random::<f64>() * max_f;
297        if u <= f(x, y) {
298            samples.push([x, y]);
299        }
300    }
301    samples
302}
303/// Van der Corput / Halton sequence in a given `base`.  Returns `n` values in `[0,1)`.
304#[allow(dead_code)]
305pub fn halton_sequence(n: usize, base: u32) -> Vec<f64> {
306    HaltonSequence::sample(n, base)
307}
308/// Direction numbers for dimensions 1–10 of the Sobol sequence.
309///
310/// These are the standard Joe & Kuo direction numbers for `s ≤ 10` dimensions.
311/// Dimension 0 is the trivial sequence `1<<(BITS-1-i)`.
312/// Dimensions 1–9 use the published primitive polynomials and initialisation
313/// values from Joe & Kuo (2003).
314pub(super) fn sobol10_direction_numbers(dim: usize) -> Vec<u32> {
315    pub(super) const BITS: usize = 32;
316    match dim {
317        0 => (0..BITS).map(|i| 1u32 << (BITS - 1 - i)).collect(),
318        1 => {
319            let mut v = vec![0u32; BITS];
320            v[0] = 1u32 << (BITS - 1);
321            for i in 1..BITS {
322                v[i] = v[i - 1] ^ (v[i - 1] >> 1);
323            }
324            v
325        }
326        2 => {
327            let mut v = vec![0u32; BITS];
328            v[0] = 1u32 << (BITS - 1);
329            v[1] = 1u32 << (BITS - 2);
330            for i in 2..BITS {
331                v[i] = v[i - 2] ^ (v[i - 2] >> 2) ^ v[i - 1] ^ (v[i - 1] >> 1);
332            }
333            v
334        }
335        3 => {
336            let mut v = vec![0u32; BITS];
337            v[0] = 1u32 << (BITS - 1);
338            v[1] = 1u32 << (BITS - 2);
339            v[2] = (3u32 << (BITS - 3)).wrapping_shr(0);
340            v[2] = 3u32 << (BITS - 3);
341            for i in 3..BITS {
342                v[i] = v[i - 3] ^ (v[i - 3] >> 3) ^ v[i - 1] ^ (v[i - 1] >> 1);
343            }
344            v
345        }
346        4 => {
347            let mut v = vec![0u32; BITS];
348            v[0] = 1u32 << (BITS - 1);
349            v[1] = 3u32 << (BITS - 3);
350            v[2] = 7u32 << (BITS - 4);
351            for i in 3..BITS {
352                v[i] = v[i - 3]
353                    ^ (v[i - 3] >> 3)
354                    ^ (v[i - 2] << 1)
355                    ^ (v[i - 2] >> 1)
356                    ^ v[i - 1]
357                    ^ (v[i - 1] >> 1);
358            }
359            v
360        }
361        5 => {
362            let mut v = vec![0u32; BITS];
363            v[0] = 1u32 << (BITS - 1);
364            v[1] = 1u32 << (BITS - 2);
365            v[2] = 5u32 << (BITS - 4);
366            v[3] = 13u32 << (BITS - 5);
367            for i in 4..BITS {
368                v[i] = v[i - 4] ^ (v[i - 4] >> 4) ^ v[i - 1] ^ (v[i - 1] >> 1);
369            }
370            v
371        }
372        6 => {
373            let mut v = vec![0u32; BITS];
374            v[0] = 1u32 << (BITS - 1);
375            v[1] = 3u32 << (BITS - 3);
376            v[2] = 13u32 << (BITS - 5);
377            v[3] = 5u32 << (BITS - 4);
378            for i in 4..BITS {
379                v[i] = v[i - 4]
380                    ^ (v[i - 4] >> 4)
381                    ^ (v[i - 3] << 1)
382                    ^ (v[i - 3] >> 2)
383                    ^ v[i - 1]
384                    ^ (v[i - 1] >> 1);
385            }
386            v
387        }
388        7 => {
389            let mut v = vec![0u32; BITS];
390            v[0] = 1u32 << (BITS - 1);
391            v[1] = 1u32 << (BITS - 2);
392            v[2] = 5u32 << (BITS - 4);
393            v[3] = 11u32 << (BITS - 5);
394            v[4] = 19u32 << (BITS - 6);
395            for i in 5..BITS {
396                v[i] = v[i - 5]
397                    ^ (v[i - 5] >> 5)
398                    ^ v[i - 2]
399                    ^ (v[i - 2] >> 2)
400                    ^ v[i - 1]
401                    ^ (v[i - 1] >> 1);
402            }
403            v
404        }
405        8 => {
406            let mut v = vec![0u32; BITS];
407            v[0] = 1u32 << (BITS - 1);
408            v[1] = 3u32 << (BITS - 3);
409            v[2] = 7u32 << (BITS - 4);
410            v[3] = 5u32 << (BITS - 4);
411            v[4] = 21u32 << (BITS - 6);
412            for i in 5..BITS {
413                v[i] = v[i - 5]
414                    ^ (v[i - 5] >> 5)
415                    ^ (v[i - 3] << 1)
416                    ^ (v[i - 3] >> 2)
417                    ^ v[i - 1]
418                    ^ (v[i - 1] >> 1);
419            }
420            v
421        }
422        9 => {
423            let mut v = vec![0u32; BITS];
424            v[0] = 1u32 << (BITS - 1);
425            v[1] = 1u32 << (BITS - 2);
426            v[2] = 1u32 << (BITS - 3);
427            v[3] = 7u32 << (BITS - 5);
428            v[4] = 13u32 << (BITS - 6);
429            v[5] = 37u32 << (BITS - 7);
430            for i in 6..BITS {
431                v[i] = v[i - 6] ^ (v[i - 6] >> 6) ^ v[i - 1] ^ (v[i - 1] >> 1);
432            }
433            v
434        }
435        _ => (0..BITS).map(|i| 1u32 << (BITS - 1 - i)).collect(),
436    }
437}
438/// Generate `n` Sobol points in 10 dimensions.
439///
440/// Each point is a `Vec`f64` of length 10 with values in `\[0, 1)`.
441#[allow(dead_code)]
442pub fn sobol_sequence_10d(n: usize) -> Vec<Vec<f64>> {
443    pub(super) const BITS: usize = 32;
444    pub(super) const N_DIMS: usize = 10;
445    let dirs: Vec<Vec<u32>> = (0..N_DIMS).map(sobol10_direction_numbers).collect();
446    (0..n)
447        .map(|idx| {
448            let gray = idx ^ (idx >> 1);
449            (0..N_DIMS)
450                .map(|d| {
451                    let x = (0..BITS)
452                        .filter(|&i| (gray >> i) & 1 == 1)
453                        .fold(0u32, |acc, i| acc ^ dirs[d][i]);
454                    x as f64 / (1u64 << BITS) as f64
455                })
456                .collect()
457        })
458        .collect()
459}
460/// Monte Carlo integration of `f` over `\[a, b\]` using `n` uniform samples.
461///
462/// Returns `(estimate, standard_error)`.
463///
464/// The estimate converges as O(1/√n) to ∫_a^b f(x) dx.
465#[allow(dead_code)]
466pub fn monte_carlo_integrate(
467    f: impl Fn(f64) -> f64,
468    a: f64,
469    b: f64,
470    n: usize,
471    rng: &mut Lcg,
472) -> (f64, f64) {
473    let width = b - a;
474    let mut sum = 0.0_f64;
475    let mut sum2 = 0.0_f64;
476    for _ in 0..n {
477        let x = rng.next_f64_range(a, b);
478        let v = f(x);
479        sum += v;
480        sum2 += v * v;
481    }
482    let n_f = n as f64;
483    let mean = sum / n_f;
484    let variance = (sum2 / n_f - mean * mean).max(0.0);
485    let stderr = (variance / n_f).sqrt() * width;
486    (mean * width, stderr)
487}
488/// Monte Carlo integration of `f` over the unit hypercube `\[0,1)^d` in `d` dimensions.
489///
490/// Returns `(estimate, standard_error)`.
491#[allow(dead_code)]
492pub fn monte_carlo_integrate_nd(
493    f: impl Fn(&[f64]) -> f64,
494    d: usize,
495    n: usize,
496    rng: &mut Lcg,
497) -> (f64, f64) {
498    let mut sum = 0.0_f64;
499    let mut sum2 = 0.0_f64;
500    let mut pt = vec![0.0_f64; d];
501    for _ in 0..n {
502        for x in pt.iter_mut() {
503            *x = rng.next_f64();
504        }
505        let v = f(&pt);
506        sum += v;
507        sum2 += v * v;
508    }
509    let n_f = n as f64;
510    let mean = sum / n_f;
511    let variance = (sum2 / n_f - mean * mean).max(0.0);
512    (mean, (variance / n_f).sqrt())
513}
514/// Quasi-Monte Carlo integration using the Halton sequence.
515///
516/// Integrates `f` over `\[a, b\]` using `n` Halton (van der Corput base-2) points.
517/// Typically O(log(n)/n) convergence for smooth integrands.
518#[allow(dead_code)]
519pub fn qmc_integrate_halton(f: impl Fn(f64) -> f64, a: f64, b: f64, n: usize) -> f64 {
520    let width = b - a;
521    let sum: f64 = (1..=n)
522        .map(|i| {
523            let t = HaltonSequence::van_der_corput(i as u32, 2);
524            f(a + t * width)
525        })
526        .sum();
527    sum / n as f64 * width
528}
529/// Quasi-Monte Carlo integration using the 1-D Sobol sequence.
530///
531/// Integrates `f` over `\[a, b\]` using `n` Sobol points.
532#[allow(dead_code)]
533pub fn qmc_integrate_sobol(f: impl Fn(f64) -> f64, a: f64, b: f64, n: usize) -> f64 {
534    let width = b - a;
535    let mut sobol = Sobol::new_1d();
536    let _ = sobol.next_1d();
537    let sum: f64 = (0..n).map(|_| f(a + sobol.next_1d() * width)).sum();
538    sum / n as f64 * width
539}
540/// Control-variate Monte Carlo estimator.
541///
542/// Estimates `E\[f(X)\]` using control variate `g` whose mean `E\[g(X)\]` is known.
543///
544/// Returns the reduced-variance estimate.
545#[allow(dead_code)]
546pub fn mc_control_variate(
547    f: impl Fn(f64) -> f64,
548    g: impl Fn(f64) -> f64,
549    g_mean: f64,
550    a: f64,
551    b: f64,
552    n: usize,
553    rng: &mut Lcg,
554) -> f64 {
555    let width = b - a;
556    let mut f_vals = Vec::with_capacity(n);
557    let mut g_vals = Vec::with_capacity(n);
558    for _ in 0..n {
559        let x = rng.next_f64_range(a, b);
560        f_vals.push(f(x));
561        g_vals.push(g(x));
562    }
563    let n_f = n as f64;
564    let f_mean = f_vals.iter().sum::<f64>() / n_f;
565    let g_sample_mean = g_vals.iter().sum::<f64>() / n_f;
566    let mut cov_fg = 0.0_f64;
567    let mut var_g = 0.0_f64;
568    for (&fi, &gi) in f_vals.iter().zip(g_vals.iter()) {
569        cov_fg += (fi - f_mean) * (gi - g_sample_mean);
570        var_g += (gi - g_sample_mean) * (gi - g_sample_mean);
571    }
572    let c = if var_g > 1e-30 { cov_fg / var_g } else { 0.0 };
573    (f_mean - c * (g_sample_mean - g_mean)) * width
574}
575/// Antithetic variates Monte Carlo estimator.
576///
577/// For each sample `u`, also evaluates at `1-u`, halving the number of
578/// independent evaluations needed for the same variance reduction.
579#[allow(dead_code)]
580pub fn mc_antithetic(f: impl Fn(f64) -> f64, a: f64, b: f64, n: usize, rng: &mut Lcg) -> f64 {
581    let width = b - a;
582    let m = n.div_ceil(2);
583    let sum: f64 = (0..m)
584        .map(|_| {
585            let u = rng.next_f64();
586            let x1 = a + u * width;
587            let x2 = a + (1.0 - u) * width;
588            (f(x1) + f(x2)) * 0.5
589        })
590        .sum();
591    sum / m as f64 * width
592}
593/// Compute unnormalized importance weights for a set of samples.
594///
595/// Given samples drawn from `proposal(x)`, returns the importance weight
596/// `target(x) / proposal(x)` for each sample.
597#[allow(dead_code)]
598pub fn importance_weights(
599    samples: &[f64],
600    target: impl Fn(f64) -> f64,
601    proposal: impl Fn(f64) -> f64,
602) -> Vec<f64> {
603    samples
604        .iter()
605        .map(|&x| {
606            let p = proposal(x);
607            if p > 1e-300 { target(x) / p } else { 0.0 }
608        })
609        .collect()
610}
611/// Self-normalized importance sampling estimator for `E_target\[h(X)\]`.
612///
613/// Samples are drawn from `proposal`; returns the weighted mean of `h`.
614#[allow(dead_code)]
615pub fn self_normalized_is(
616    samples: &[f64],
617    h: impl Fn(f64) -> f64,
618    target: impl Fn(f64) -> f64,
619    proposal: impl Fn(f64) -> f64,
620) -> f64 {
621    let mut sum_w = 0.0_f64;
622    let mut sum_wh = 0.0_f64;
623    for &x in samples {
624        let p = proposal(x);
625        if p < 1e-300 {
626            continue;
627        }
628        let w = target(x) / p;
629        sum_w += w;
630        sum_wh += w * h(x);
631    }
632    if sum_w < 1e-300 { 0.0 } else { sum_wh / sum_w }
633}
634/// Effective sample size (ESS) from normalized importance weights.
635///
636/// ESS = (Σ wᵢ)² / Σ wᵢ²
637#[allow(dead_code)]
638pub fn effective_sample_size(weights: &[f64]) -> f64 {
639    let sum_w: f64 = weights.iter().sum();
640    if sum_w < 1e-300 {
641        return 0.0;
642    }
643    let sum_w2: f64 = weights.iter().map(|&w| (w / sum_w) * (w / sum_w)).sum();
644    if sum_w2 < 1e-300 {
645        weights.len() as f64
646    } else {
647        1.0 / sum_w2
648    }
649}
650/// Generate `n_samples` stratified samples over the unit hypercube `\[0,1)^d`.
651///
652/// This is the Latin Hypercube design: for each dimension, the `n_samples`
653/// strata are shuffled independently so that every stratum is visited exactly
654/// once per dimension.  Identical to [`LatinHypercube::sample`] but as a
655/// free function.
656#[allow(dead_code)]
657pub fn stratified_unit_hypercube(n_samples: usize, d: usize, rng: &mut Lcg) -> Vec<Vec<f64>> {
658    LatinHypercube::new(n_samples, d).sample(rng)
659}
660/// Latin Hypercube Sampling with maximin distance optimization.
661///
662/// Generates `n_candidates` LHS designs and returns the one whose minimum
663/// inter-point distance (maximin criterion) is largest.
664///
665/// # Arguments
666/// * `n_samples`    – number of points per design
667/// * `n_dims`       – number of dimensions
668/// * `n_candidates` – how many candidate designs to evaluate
669/// * `rng`          – random number generator
670#[allow(dead_code)]
671pub fn lhs_maximin(
672    n_samples: usize,
673    n_dims: usize,
674    n_candidates: usize,
675    rng: &mut Lcg,
676) -> Vec<Vec<f64>> {
677    let lhs = LatinHypercube::new(n_samples, n_dims);
678    let mut best: Vec<Vec<f64>> = lhs.sample(rng);
679    let mut best_min_dist = min_pairwise_dist(&best);
680    for _ in 1..n_candidates {
681        let candidate = lhs.sample(rng);
682        let d = min_pairwise_dist(&candidate);
683        if d > best_min_dist {
684            best_min_dist = d;
685            best = candidate;
686        }
687    }
688    best
689}
690/// Compute the minimum pairwise Euclidean distance among a set of points.
691pub(super) fn min_pairwise_dist(pts: &[Vec<f64>]) -> f64 {
692    let n = pts.len();
693    if n < 2 {
694        return f64::INFINITY;
695    }
696    let mut min_d2 = f64::INFINITY;
697    for i in 0..n {
698        for j in (i + 1)..n {
699            let d2: f64 = pts[i]
700                .iter()
701                .zip(pts[j].iter())
702                .map(|(a, b)| (a - b).powi(2))
703                .sum();
704            if d2 < min_d2 {
705                min_d2 = d2;
706            }
707        }
708    }
709    min_d2.sqrt()
710}
711/// Bootstrap resample `data` with replacement, returning `n` resampled datasets.
712///
713/// Each dataset is a vector of length `data.len()` drawn uniformly with
714/// replacement from `data`.
715#[allow(dead_code)]
716pub fn bootstrap_resample(data: &[f64], n_resamples: usize, rng: &mut Lcg) -> Vec<Vec<f64>> {
717    let m = data.len();
718    (0..n_resamples)
719        .map(|_| {
720            (0..m)
721                .map(|_| {
722                    let idx = (rng.next_u64() as usize) % m;
723                    data[idx]
724                })
725                .collect()
726        })
727        .collect()
728}
729/// Bootstrap estimate of the mean and its 95% confidence interval.
730///
731/// Returns `(mean, ci_low, ci_high)`.
732#[allow(dead_code)]
733pub fn bootstrap_mean_ci(data: &[f64], n_resamples: usize, rng: &mut Lcg) -> (f64, f64, f64) {
734    let resamples = bootstrap_resample(data, n_resamples, rng);
735    let mut means: Vec<f64> = resamples
736        .iter()
737        .map(|s| s.iter().sum::<f64>() / s.len() as f64)
738        .collect();
739    means.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
740    let mean = data.iter().sum::<f64>() / data.len() as f64;
741    let lo_idx = (0.025 * n_resamples as f64) as usize;
742    let hi_idx = ((0.975 * n_resamples as f64) as usize).min(n_resamples - 1);
743    (mean, means[lo_idx], means[hi_idx])
744}
745/// Draw `n` samples from an arbitrary 1-D PDF using rejection sampling.
746///
747/// `pdf` must be non-negative; `envelope` must satisfy `pdf(x) ≤ envelope`
748/// for all `x ∈ \[lo, hi)`.
749///
750/// # Arguments
751/// * `pdf`      – target density function
752/// * `lo`, `hi` – support of the distribution
753/// * `envelope` – upper bound of `pdf` over the support
754/// * `n`        – number of samples to draw
755/// * `rng`      – LCG random source
756#[allow(dead_code)]
757pub fn rejection_sample_1d(
758    pdf: impl Fn(f64) -> f64,
759    lo: f64,
760    hi: f64,
761    envelope: f64,
762    n: usize,
763    rng: &mut Lcg,
764) -> Vec<f64> {
765    let mut samples = Vec::with_capacity(n);
766    while samples.len() < n {
767        let x = rng.next_f64_range(lo, hi);
768        let u = rng.next_f64() * envelope;
769        if u < pdf(x) {
770            samples.push(x);
771        }
772    }
773    samples
774}
775/// Compute IS estimate of `E\[h(X)\]` where `X ~ target` but samples are drawn
776/// from `proposal`.
777///
778/// Returns `(estimate, effective_sample_size)`.
779///
780/// `weights\[i\] = target(x_i) / proposal(x_i)`.
781#[allow(dead_code)]
782pub fn importance_sampling_estimate(
783    samples: &[f64],
784    h: impl Fn(f64) -> f64,
785    target: impl Fn(f64) -> f64,
786    proposal: impl Fn(f64) -> f64,
787) -> (f64, f64) {
788    let mut sum_w = 0.0_f64;
789    let mut sum_wh = 0.0_f64;
790    let mut sum_w2 = 0.0_f64;
791    for &x in samples {
792        let p = proposal(x);
793        if p < 1e-300 {
794            continue;
795        }
796        let w = target(x) / p;
797        sum_w += w;
798        sum_wh += w * h(x);
799        sum_w2 += (w / 1.0).powi(2);
800    }
801    let estimate = if sum_w > 1e-300 { sum_wh / sum_w } else { 0.0 };
802    let ess = if sum_w2 > 1e-300 {
803        (sum_w * sum_w) / sum_w2
804    } else {
805        0.0
806    };
807    (estimate, ess)
808}
809#[cfg(test)]
810mod tests {
811    use super::*;
812
813    use crate::sampling::GibbsSampler;
814
815    use crate::sampling::ImportanceSampler;
816    use crate::sampling::Lcg;
817    use crate::sampling::MetropolisHastings;
818    use crate::sampling::StratifiedSampler;
819
820    /// next_f64 must lie in [0, 1).
821    #[test]
822    fn test_next_f64_range() {
823        let mut rng = Lcg::new(42);
824        for _ in 0..10_000 {
825            let v = rng.next_f64();
826            assert!((0.0..1.0).contains(&v), "out of range: {v}");
827        }
828    }
829    /// Sample 1 000 standard normals; empirical mean must be close to 0.
830    #[test]
831    fn test_next_normal_mean() {
832        let mut rng = Lcg::new(123);
833        let n = 1_000;
834        let mean: f64 = (0..n).map(|_| rng.next_normal()).sum::<f64>() / n as f64;
835        assert!(mean.abs() < 0.15, "mean {mean} too far from 0");
836    }
837    /// All points on the sphere surface must have |v| ≈ 1.
838    #[test]
839    fn test_sample_sphere_surface_norm() {
840        let mut rng = Lcg::new(7);
841        for _ in 0..1_000 {
842            let [x, y, z] = sample_sphere_surface(&mut rng);
843            let r = (x * x + y * y + z * z).sqrt();
844            assert!((r - 1.0).abs() < 1e-12, "sphere surface radius = {r}");
845        }
846    }
847    /// All points inside the ball must have |v| ≤ 1.
848    #[test]
849    fn test_sample_sphere_volume_inside() {
850        let mut rng = Lcg::new(99);
851        for _ in 0..1_000 {
852            let [x, y, z] = sample_sphere_volume(&mut rng);
853            let r2 = x * x + y * y + z * z;
854            assert!(r2 <= 1.0 + 1e-12, "point outside unit ball: r² = {r2}");
855        }
856    }
857    /// Stratified samples: every stratum `\[k/n, (k+1)/n)` must be occupied.
858    #[test]
859    fn test_stratified_sample_1d_coverage() {
860        let mut rng = Lcg::new(55);
861        let n = 20;
862        let samples = stratified_sample_1d(n, &mut rng);
863        assert_eq!(samples.len(), n);
864        let inv_n = 1.0 / n as f64;
865        for (k, &s) in samples.iter().enumerate() {
866            let lo = k as f64 * inv_n;
867            let hi = lo + inv_n;
868            assert!(
869                s >= lo && s < hi,
870                "stratum {k}: sample {s} not in [{lo}, {hi})"
871            );
872        }
873    }
874    #[test]
875    fn test_lhs_sample_count() {
876        let mut rng = Lcg::new(100);
877        let lhs = LatinHypercube::new(10, 3);
878        let samples = lhs.sample(&mut rng);
879        assert_eq!(samples.len(), 10);
880        for s in &samples {
881            assert_eq!(s.len(), 3);
882        }
883    }
884    #[test]
885    fn test_lhs_covers_all_strata() {
886        let mut rng = Lcg::new(200);
887        let n = 8;
888        let lhs = LatinHypercube::new(n, 2);
889        let samples = lhs.sample(&mut rng);
890        let inv_n = 1.0 / n as f64;
891        for d in 0..2 {
892            let mut strata = vec![false; n];
893            for s in &samples {
894                let stratum = (s[d] / inv_n) as usize;
895                let stratum = stratum.min(n - 1);
896                strata[stratum] = true;
897            }
898            for (k, &occupied) in strata.iter().enumerate() {
899                assert!(occupied, "dim {d} stratum {k} not occupied");
900            }
901        }
902    }
903    #[test]
904    fn test_stratified_2d_count() {
905        let mut rng = Lcg::new(300);
906        let ss = StratifiedSampler::new(5);
907        let samples = ss.sample_2d(&mut rng);
908        assert_eq!(samples.len(), 25);
909    }
910    #[test]
911    fn test_stratified_2d_coverage() {
912        let mut rng = Lcg::new(400);
913        let n = 4;
914        let ss = StratifiedSampler::new(n);
915        let samples = ss.sample_2d(&mut rng);
916        let inv_n = 1.0 / n as f64;
917        for (idx, s) in samples.iter().enumerate() {
918            let ix = idx % n;
919            let iy = idx / n;
920            let x_lo = ix as f64 * inv_n;
921            let y_lo = iy as f64 * inv_n;
922            assert!(s[0] >= x_lo && s[0] < x_lo + inv_n, "x out of stratum");
923            assert!(s[1] >= y_lo && s[1] < y_lo + inv_n, "y out of stratum");
924        }
925    }
926    #[test]
927    fn test_halton_in_unit_interval() {
928        let samples = HaltonSequence::sample(100, 2);
929        for &s in &samples {
930            assert!((0.0..1.0).contains(&s), "Halton sample {s} out of [0,1)");
931        }
932    }
933    #[test]
934    fn test_halton_base2_first_values() {
935        let samples = HaltonSequence::sample(4, 2);
936        assert!((samples[0] - 0.5).abs() < 1e-12);
937        assert!((samples[1] - 0.25).abs() < 1e-12);
938        assert!((samples[2] - 0.75).abs() < 1e-12);
939        assert!((samples[3] - 0.125).abs() < 1e-12);
940    }
941    #[test]
942    fn test_sobol_multi_dim_count() {
943        let sobol = SobolSequence::new(3);
944        let samples = sobol.sample(16);
945        assert_eq!(samples.len(), 16);
946        for s in &samples {
947            assert_eq!(s.len(), 3);
948        }
949    }
950    #[test]
951    fn test_sobol_in_unit_interval() {
952        let sobol = SobolSequence::new(2);
953        let samples = sobol.sample(64);
954        for s in &samples {
955            for &v in s {
956                assert!((0.0..1.0).contains(&v), "Sobol value {v} out of [0,1)");
957            }
958        }
959    }
960    #[test]
961    fn test_importance_sampler_generates_samples() {
962        pub(super) fn uniform_pdf(_x: f64) -> f64 {
963            1.0
964        }
965        let sampler = ImportanceSampler::new(uniform_pdf, 1.0);
966        let mut rng = Lcg::new(500);
967        let samples = sampler.sample(50, &mut rng);
968        assert_eq!(samples.len(), 50);
969        for &s in &samples {
970            assert!((0.0..1.0).contains(&s));
971        }
972    }
973    #[test]
974    fn test_importance_sampler_biased_pdf() {
975        pub(super) fn biased_pdf(x: f64) -> f64 {
976            if x >= 0.5 { 2.0 } else { 0.0 }
977        }
978        let sampler = ImportanceSampler::new(biased_pdf, 2.0);
979        let mut rng = Lcg::new(600);
980        let samples = sampler.sample(100, &mut rng);
981        for &s in &samples {
982            assert!(s >= 0.5, "all samples should be >= 0.5, got {s}");
983        }
984    }
985    #[test]
986    fn test_metropolis_hastings_stays_in_range() {
987        pub(super) fn log_target(x: f64) -> f64 {
988            -0.5 * x * x
989        }
990        let mut mcmc = MetropolisHastings::new(0.0, 1.0, 42);
991        let samples = mcmc.sample(500, log_target);
992        let n_outliers = samples.iter().filter(|&&x| x.abs() > 5.0).count();
993        assert!(n_outliers < 20, "{} outliers > 5σ", n_outliers);
994    }
995    #[test]
996    fn test_metropolis_hastings_mean_near_zero() {
997        pub(super) fn log_target(x: f64) -> f64 {
998            -0.5 * x * x
999        }
1000        let mut mcmc = MetropolisHastings::new(0.0, 0.5, 99);
1001        let samples = mcmc.sample(2000, log_target);
1002        let mean: f64 = samples[500..].iter().sum::<f64>() / (samples.len() - 500) as f64;
1003        assert!(mean.abs() < 0.5, "mean={}", mean);
1004    }
1005    #[test]
1006    fn test_gibbs_sampler_basic() {
1007        let mut gibbs = GibbsSampler::new(2, 42);
1008        let samples = gibbs.sample(
1009            200,
1010            &[
1011                Box::new(|_, _: &[f64]| 0.0_f64),
1012                Box::new(|_, _: &[f64]| 0.0_f64),
1013            ],
1014            &[1.0, 1.0],
1015        );
1016        assert_eq!(samples.len(), 200);
1017        for s in &samples {
1018            assert_eq!(s.len(), 2);
1019        }
1020    }
1021    #[test]
1022    fn test_blue_noise_count() {
1023        let mut rng = Lcg::new(777);
1024        let samples = blue_noise_2d(16, 0.1, &mut rng);
1025        assert_eq!(samples.len(), 16);
1026    }
1027    #[test]
1028    fn test_blue_noise_min_distance() {
1029        let mut rng = Lcg::new(888);
1030        let r = 0.15;
1031        let n = 20;
1032        let samples = blue_noise_2d(n, r, &mut rng);
1033        for i in 0..samples.len() {
1034            for j in (i + 1)..samples.len() {
1035                let dx = samples[i][0] - samples[j][0];
1036                let dy = samples[i][1] - samples[j][1];
1037                let d = (dx * dx + dy * dy).sqrt();
1038                assert!(d >= r - 1e-9, "samples {} and {} too close: d={}", i, j, d);
1039            }
1040        }
1041    }
1042    #[test]
1043    fn test_stratified_nd_count() {
1044        let mut rng = Lcg::new(999);
1045        let samples = stratified_sample_nd(3, 4, &mut rng);
1046        assert_eq!(samples.len(), 3);
1047        for s in &samples {
1048            assert_eq!(s.len(), 4);
1049        }
1050    }
1051    #[test]
1052    fn test_stratified_nd_in_range() {
1053        let mut rng = Lcg::new(1001);
1054        let samples = stratified_sample_nd(10, 3, &mut rng);
1055        for s in &samples {
1056            for &v in s {
1057                assert!((0.0..=1.0).contains(&v), "value {} out of [0,1]", v);
1058            }
1059        }
1060    }
1061    #[test]
1062    fn test_sobol_1d_low_discrepancy() {
1063        let mut sobol = Sobol::new_1d();
1064        let vals: Vec<f64> = (0..16).map(|_| sobol.next_1d()).collect();
1065        for &v in &vals {
1066            assert!((0.0..1.0).contains(&v), "Sobol val {} out of range", v);
1067        }
1068    }
1069    #[test]
1070    fn test_lhs_rand_count_and_dims() {
1071        let mut rng = rand::rng();
1072        let samples = latin_hypercube_sample(12, 4, &mut rng);
1073        assert_eq!(samples.len(), 12);
1074        for s in &samples {
1075            assert_eq!(s.len(), 4);
1076        }
1077    }
1078    #[test]
1079    fn test_sobol_sequence_equidistributed_dim0() {
1080        let samples = sobol_sequence(16, 1);
1081        assert_eq!(samples.len(), 16);
1082        let mut sorted: Vec<f64> = samples.iter().map(|s| s[0]).collect();
1083        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1084        for &v in &sorted {
1085            assert!((0.0..1.0).contains(&v), "sobol val {} out of range", v);
1086        }
1087    }
1088    #[test]
1089    fn test_stratified_sample_1d_rng_covers_unit_interval() {
1090        let mut rng = rand::rng();
1091        let n = 10;
1092        let samples = stratified_sample_1d_rng(n, &mut rng);
1093        assert_eq!(samples.len(), n);
1094        for &s in &samples {
1095            assert!((0.0..=1.0).contains(&s), "value {} out of [0,1]", s);
1096        }
1097    }
1098    #[test]
1099    fn test_halton_sequence_in_unit_interval() {
1100        let samples = halton_sequence(50, 2);
1101        assert_eq!(samples.len(), 50);
1102        let sum: f64 = samples.iter().sum();
1103        assert!(
1104            sum > 0.0 && sum < 50.0,
1105            "Halton sum {} out of expected range",
1106            sum
1107        );
1108        for &s in &samples {
1109            assert!((0.0..1.0).contains(&s), "Halton value {} out of [0,1)", s);
1110        }
1111    }
1112    #[test]
1113    fn test_importance_sample_count() {
1114        let mut rng = rand::rng();
1115        let pdf = vec![1.0, 2.0, 3.0, 4.0];
1116        let samples = importance_sample(&pdf, 100, &mut rng);
1117        assert_eq!(samples.len(), 100);
1118        for &idx in &samples {
1119            assert!(idx < pdf.len(), "index {} out of range", idx);
1120        }
1121    }
1122    #[test]
1123    fn test_importance_sample_biased_toward_last() {
1124        let mut rng = rand::rng();
1125        let pdf = vec![0.001, 0.001, 0.001, 100.0];
1126        let samples = importance_sample(&pdf, 200, &mut rng);
1127        let n_last = samples.iter().filter(|&&i| i == 3).count();
1128        assert!(
1129            n_last > 150,
1130            "expected mostly index 3, got {} out of 200",
1131            n_last
1132        );
1133    }
1134    #[test]
1135    fn test_rejection_sample_2d_count() {
1136        let mut rng = rand::rng();
1137        let samples = rejection_sample_2d(|_x, _y| 1.0, (0.0, 1.0), (0.0, 1.0), 20, &mut rng);
1138        assert_eq!(samples.len(), 20);
1139        for s in &samples {
1140            assert!(s[0] >= 0.0 && s[0] < 1.0, "x={} out of range", s[0]);
1141            assert!(s[1] >= 0.0 && s[1] < 1.0, "y={} out of range", s[1]);
1142        }
1143    }
1144    #[test]
1145    fn test_weighted_resample_count() {
1146        let particles: Vec<f64> = (0..10).map(|i| i as f64).collect();
1147        let weights: Vec<f64> = vec![1.0; 10];
1148        let mut rng = Lcg::new(42);
1149        let resampled = systematic_resample(&particles, &weights, 10, &mut rng);
1150        assert_eq!(resampled.len(), 10);
1151    }
1152    #[test]
1153    fn test_weighted_resample_biased() {
1154        let particles: Vec<f64> = (0..10).map(|i| i as f64).collect();
1155        let mut weights = vec![0.001; 10];
1156        weights[5] = 100.0;
1157        let mut rng = Lcg::new(42);
1158        let resampled = systematic_resample(&particles, &weights, 20, &mut rng);
1159        let n_fives = resampled
1160            .iter()
1161            .filter(|&&v| (v - 5.0).abs() < 1e-9)
1162            .count();
1163        assert!(
1164            n_fives > 15,
1165            "most resampled should be 5.0, got {}",
1166            n_fives
1167        );
1168    }
1169    #[test]
1170    fn test_sobol10_in_unit_interval() {
1171        let pts = sobol_sequence_10d(64);
1172        assert_eq!(pts.len(), 64);
1173        for p in &pts {
1174            assert_eq!(p.len(), 10);
1175            for &v in p {
1176                assert!((0.0..1.0).contains(&v), "Sobol10 value {} out of [0,1)", v);
1177            }
1178        }
1179    }
1180    #[test]
1181    fn test_sobol10_dim0_matches_1d() {
1182        let pts10 = sobol_sequence_10d(16);
1183        let pts1 = sobol_sequence(16, 1);
1184        for (p10, p1) in pts10.iter().zip(pts1.iter()) {
1185            assert!((p10[0] - p1[0]).abs() < 1e-14, "dim0 mismatch");
1186        }
1187    }
1188    #[test]
1189    fn test_monte_carlo_integrate_constant() {
1190        let mut rng = Lcg::new(42);
1191        let (est, err) = monte_carlo_integrate(|_| 1.0, 0.0, 1.0, 10_000, &mut rng);
1192        assert!((est - 1.0).abs() < 0.01, "estimate={}", est);
1193        assert!(err < 0.01, "error={}", err);
1194    }
1195    #[test]
1196    fn test_monte_carlo_integrate_linear() {
1197        let mut rng = Lcg::new(7);
1198        let (est, _err) = monte_carlo_integrate(|x| x, 0.0, 1.0, 50_000, &mut rng);
1199        assert!((est - 0.5).abs() < 0.01, "estimate={}", est);
1200    }
1201    #[test]
1202    fn test_monte_carlo_integrate_quadratic() {
1203        let mut rng = Lcg::new(13);
1204        let (est, _err) = monte_carlo_integrate(|x| x * x, 0.0, 1.0, 100_000, &mut rng);
1205        assert!((est - 1.0 / 3.0).abs() < 0.01, "estimate={}", est);
1206    }
1207    #[test]
1208    fn test_qmc_halton_vs_mc_variance() {
1209        let qmc_est = qmc_integrate_halton(|x| x * x, 0.0, 1.0, 1024);
1210        assert!(
1211            (qmc_est - 1.0 / 3.0).abs() < 0.005,
1212            "QMC estimate={}",
1213            qmc_est
1214        );
1215    }
1216    #[test]
1217    fn test_qmc_sobol_vs_mc_variance() {
1218        let est = qmc_integrate_sobol(|x| x, 0.0, 1.0, 1024);
1219        assert!((est - 0.5).abs() < 0.005, "Sobol QMC estimate={}", est);
1220    }
1221    #[test]
1222    fn test_importance_weight_estimator_uniform() {
1223        let samples: Vec<f64> = (0..10).map(|i| (i as f64 + 0.5) / 10.0).collect();
1224        let weights = importance_weights(&samples, |_| 1.0, |_| 1.0);
1225        for &w in &weights {
1226            assert!((w - 1.0).abs() < 1e-12, "weight={}", w);
1227        }
1228    }
1229    #[test]
1230    fn test_importance_weight_estimator_ratio() {
1231        let samples = vec![0.25, 0.5, 0.75];
1232        let weights = importance_weights(&samples, |x| 2.0 * x, |_| 1.0);
1233        assert!((weights[0] - 0.5).abs() < 1e-12);
1234        assert!((weights[1] - 1.0).abs() < 1e-12);
1235        assert!((weights[2] - 1.5).abs() < 1e-12);
1236    }
1237    #[test]
1238    fn test_stratified_hypercube_all_in_range() {
1239        let mut rng = Lcg::new(2025);
1240        let pts = stratified_unit_hypercube(8, 5, &mut rng);
1241        assert_eq!(pts.len(), 8);
1242        for p in &pts {
1243            assert_eq!(p.len(), 5);
1244            for &v in p {
1245                assert!((0.0..1.0).contains(&v), "v={} out of [0,1)", v);
1246            }
1247        }
1248    }
1249    #[test]
1250    fn test_stratified_hypercube_coverage() {
1251        let mut rng = Lcg::new(3030);
1252        let n = 6;
1253        let pts = stratified_unit_hypercube(n, 2, &mut rng);
1254        let inv = 1.0 / n as f64;
1255        for d in 0..2 {
1256            let mut occupied = vec![false; n];
1257            for p in &pts {
1258                let s = (p[d] / inv) as usize;
1259                occupied[s.min(n - 1)] = true;
1260            }
1261            for (k, &hit) in occupied.iter().enumerate() {
1262                assert!(hit, "dim {d} stratum {k} not covered");
1263            }
1264        }
1265    }
1266}
1267#[cfg(test)]
1268mod extended_sampling_tests {
1269    use crate::sampling::GaussianKde;
1270
1271    use crate::sampling::HammersleySequence;
1272
1273    use crate::sampling::Lcg;
1274
1275    use crate::sampling::bootstrap_mean_ci;
1276    use crate::sampling::bootstrap_resample;
1277    use crate::sampling::importance_sampling_estimate;
1278    use crate::sampling::lhs_maximin;
1279    use crate::sampling::min_pairwise_dist;
1280    use crate::sampling::rejection_sample_1d;
1281    #[test]
1282    fn test_lhs_maximin_returns_correct_shape() {
1283        let mut rng = Lcg::new(1111);
1284        let pts = lhs_maximin(8, 3, 5, &mut rng);
1285        assert_eq!(pts.len(), 8);
1286        for p in &pts {
1287            assert_eq!(p.len(), 3);
1288            for &v in p {
1289                assert!((0.0..1.0).contains(&v), "v={} out of [0,1)", v);
1290            }
1291        }
1292    }
1293    #[test]
1294    fn test_lhs_maximin_better_than_single_lhs() {
1295        let mut rng = Lcg::new(2222);
1296        let optimized = lhs_maximin(10, 2, 20, &mut rng);
1297        let d_opt = min_pairwise_dist(&optimized);
1298        assert!(d_opt > 0.0, "min dist should be positive");
1299    }
1300    #[test]
1301    fn test_lhs_maximin_single_candidate_equals_lhs() {
1302        let mut rng = Lcg::new(42);
1303        let pts = lhs_maximin(5, 2, 1, &mut rng);
1304        let inv = 1.0 / 5.0_f64;
1305        for d in 0..2 {
1306            let mut occupied = [false; 5];
1307            for p in &pts {
1308                let s = (p[d] / inv) as usize;
1309                occupied[s.min(4)] = true;
1310            }
1311            for (k, &hit) in occupied.iter().enumerate() {
1312                assert!(
1313                    hit,
1314                    "dim {d} stratum {k} not covered in single-candidate LHS"
1315                );
1316            }
1317        }
1318    }
1319    #[test]
1320    fn test_hammersley_shape() {
1321        let pts = HammersleySequence::sample(16, 3);
1322        assert_eq!(pts.len(), 16);
1323        for p in &pts {
1324            assert_eq!(p.len(), 3);
1325            for &v in p {
1326                assert!((0.0..=1.0).contains(&v), "v={} out of [0,1]", v);
1327            }
1328        }
1329    }
1330    #[test]
1331    fn test_hammersley_first_dim_uniform_grid() {
1332        let n = 8;
1333        let pts = HammersleySequence::sample(n, 2);
1334        for (i, p) in pts.iter().enumerate() {
1335            let expected = i as f64 / n as f64;
1336            assert!((p[0] - expected).abs() < 1e-12, "dim0[{}]={}", i, p[0]);
1337        }
1338    }
1339    #[test]
1340    fn test_hammersley_second_dim_halton_base2() {
1341        let pts = HammersleySequence::sample(4, 2);
1342        let expected = [0.5, 0.25, 0.75, 0.125];
1343        for (i, &exp) in expected.iter().enumerate() {
1344            assert!(
1345                (pts[i][1] - exp).abs() < 1e-12,
1346                "dim1[{}]={} expected={}",
1347                i,
1348                pts[i][1],
1349                exp
1350            );
1351        }
1352    }
1353    #[test]
1354    fn test_bootstrap_resample_shape() {
1355        let data: Vec<f64> = (0..10).map(|i| i as f64).collect();
1356        let mut rng = Lcg::new(9999);
1357        let resamples = bootstrap_resample(&data, 50, &mut rng);
1358        assert_eq!(resamples.len(), 50);
1359        for r in &resamples {
1360            assert_eq!(r.len(), 10);
1361        }
1362    }
1363    #[test]
1364    fn test_bootstrap_resample_values_from_data() {
1365        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1366        let mut rng = Lcg::new(7777);
1367        let resamples = bootstrap_resample(&data, 20, &mut rng);
1368        for r in &resamples {
1369            for &v in r {
1370                assert!(
1371                    data.contains(&v),
1372                    "bootstrap value {} not in original data",
1373                    v
1374                );
1375            }
1376        }
1377    }
1378    #[test]
1379    fn test_bootstrap_mean_ci_contains_true_mean() {
1380        let mut rng = Lcg::new(5555);
1381        let data: Vec<f64> = (0..200).map(|_| rng.next_f64()).collect();
1382        let (mean, ci_lo, ci_hi) = bootstrap_mean_ci(&data, 500, &mut rng);
1383        assert!(ci_lo <= mean, "ci_lo={} > mean={}", ci_lo, mean);
1384        assert!(ci_hi >= mean, "ci_hi={} < mean={}", ci_hi, mean);
1385        assert!(
1386            ci_lo < 0.5 && ci_hi > 0.5,
1387            "CI [{}, {}] does not contain 0.5",
1388            ci_lo,
1389            ci_hi
1390        );
1391    }
1392    #[test]
1393    fn test_kde_positive_density() {
1394        let data = vec![0.0, 0.5, 1.0, 1.5, 2.0];
1395        let kde = GaussianKde::new(data, 0.5);
1396        for &x in &[0.0_f64, 0.5, 1.0, 1.5, 2.0] {
1397            let d = kde.evaluate(x);
1398            assert!(d > 0.0, "KDE density at {} should be positive: {}", x, d);
1399        }
1400    }
1401    #[test]
1402    fn test_kde_integrates_to_one() {
1403        let data = vec![0.0, 1.0, 2.0, 3.0, 4.0];
1404        let kde = GaussianKde::new(data, 0.5);
1405        let n = 10_000;
1406        let lo = -5.0_f64;
1407        let hi = 9.0_f64;
1408        let width = (hi - lo) / n as f64;
1409        let integral: f64 = (0..n)
1410            .map(|i| {
1411                let x = lo + (i as f64 + 0.5) * width;
1412                kde.evaluate(x) * width
1413            })
1414            .sum();
1415        assert!((integral - 1.0).abs() < 0.05, "KDE integral={}", integral);
1416    }
1417    #[test]
1418    fn test_kde_silverman_bandwidth_positive() {
1419        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
1420        let h = GaussianKde::silverman_bandwidth(&data);
1421        assert!(h > 0.0, "bandwidth should be positive: {}", h);
1422    }
1423    #[test]
1424    fn test_kde_auto_bandwidth() {
1425        let data = vec![0.0, 1.0, 2.0, 3.0];
1426        let kde = GaussianKde::new(data, -1.0);
1427        assert!(kde.bandwidth > 0.0, "auto bandwidth should be positive");
1428    }
1429    #[test]
1430    fn test_kde_evaluate_batch() {
1431        let data = vec![0.0, 1.0, 2.0];
1432        let kde = GaussianKde::new(data, 0.3);
1433        let queries = vec![0.0, 0.5, 1.0, 1.5, 2.0];
1434        let batch = kde.evaluate_batch(&queries);
1435        assert_eq!(batch.len(), 5);
1436        for &v in &batch {
1437            assert!(v > 0.0, "KDE batch value non-positive: {}", v);
1438        }
1439        let kde2 = GaussianKde::new(vec![0.0, 1.0, 2.0], 0.3);
1440        for (i, &q) in queries.iter().enumerate() {
1441            assert!((batch[i] - kde2.evaluate(q)).abs() < 1e-14);
1442        }
1443    }
1444    #[test]
1445    fn test_rejection_sample_1d_uniform() {
1446        let mut rng = Lcg::new(1234);
1447        let samples = rejection_sample_1d(|_| 1.0, 0.0, 1.0, 1.0, 50, &mut rng);
1448        assert_eq!(samples.len(), 50);
1449        for &s in &samples {
1450            assert!((0.0..1.0).contains(&s), "sample {} out of [0,1)", s);
1451        }
1452    }
1453    #[test]
1454    fn test_rejection_sample_1d_upper_half_only() {
1455        let mut rng = Lcg::new(5678);
1456        let samples = rejection_sample_1d(
1457            |x| if x >= 0.5 { 2.0 } else { 0.0 },
1458            0.0,
1459            1.0,
1460            2.0,
1461            30,
1462            &mut rng,
1463        );
1464        assert_eq!(samples.len(), 30);
1465        for &s in &samples {
1466            assert!(s >= 0.5, "sample {} should be >= 0.5", s);
1467        }
1468    }
1469    #[test]
1470    fn test_rejection_sample_1d_range_correct() {
1471        let mut rng = Lcg::new(4321);
1472        let samples = rejection_sample_1d(|x| 3.0 * x * x, 0.0, 1.0, 3.0, 100, &mut rng);
1473        let mean = samples.iter().sum::<f64>() / samples.len() as f64;
1474        assert!((mean - 0.75).abs() < 0.15, "mean={}", mean);
1475    }
1476    #[test]
1477    fn test_importance_sampling_estimate_uniform() {
1478        let samples: Vec<f64> = (0..100).map(|i| (i as f64 + 0.5) / 100.0).collect();
1479        let (est, _ess) = importance_sampling_estimate(&samples, |x| x, |_| 1.0, |_| 1.0);
1480        assert!((est - 0.5).abs() < 0.01, "estimate={}", est);
1481    }
1482    #[test]
1483    fn test_importance_sampling_estimate_ess_equals_n_uniform() {
1484        let samples: Vec<f64> = (0..50).map(|i| i as f64 / 50.0).collect();
1485        let (_est, ess) = importance_sampling_estimate(&samples, |x| x, |_| 1.0, |_| 1.0);
1486        assert!((ess - 50.0).abs() < 0.01, "ESS={}", ess);
1487    }
1488    #[test]
1489    fn test_importance_sampling_estimate_reweighted() {
1490        let mut rng = Lcg::new(8888);
1491        let samples: Vec<f64> = (0..500).map(|_| rng.next_f64()).collect();
1492        let (est, ess) = importance_sampling_estimate(&samples, |x| x, |x| 2.0 * x, |_| 1.0);
1493        assert!(
1494            (est - 2.0 / 3.0).abs() < 0.05,
1495            "IS estimate={} expected~2/3",
1496            est
1497        );
1498        assert!(ess > 0.0, "ESS should be positive: {}", ess);
1499    }
1500}