Skip to main content

oxiphysics_core/
uncertainty_quantification.rs

1#![allow(clippy::if_same_then_else, clippy::ptr_arg)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Uncertainty Quantification (UQ) methods for physics simulation.
6//!
7//! Provides:
8//! - Monte Carlo uncertainty propagation
9//! - Latin hypercube sampling (LHS)
10//! - Sensitivity analysis (Sobol indices, Morris method)
11//! - Polynomial chaos expansion (PCE)
12//! - Stochastic collocation
13//! - Gaussian process regression for UQ
14//! - Reliability analysis (FORM / SORM)
15//! - Bootstrap confidence intervals
16//! - Bayesian inference via Metropolis-Hastings MCMC
17//! - Probability of failure estimation
18
19#![allow(dead_code)]
20#![allow(clippy::too_many_arguments)]
21
22use std::f64::consts::{FRAC_1_SQRT_2, PI};
23
24// ─────────────────────────────────────────────────────────────────────────────
25// Internal LCG RNG (no external rand dependency in tests)
26// ─────────────────────────────────────────────────────────────────────────────
27
28/// Lightweight linear congruential random number generator used internally.
29struct UqRng {
30    state: u64,
31}
32
33impl UqRng {
34    fn new(seed: u64) -> Self {
35        Self { state: seed.max(1) }
36    }
37
38    fn next_u64(&mut self) -> u64 {
39        self.state = self
40            .state
41            .wrapping_mul(6_364_136_223_846_793_005)
42            .wrapping_add(1_442_695_040_888_963_407);
43        self.state
44    }
45
46    fn next_f64(&mut self) -> f64 {
47        (self.next_u64() >> 11) as f64 * (1.0 / (1u64 << 53) as f64)
48    }
49
50    /// Box-Muller standard normal sample N(0,1).
51    fn next_normal(&mut self) -> f64 {
52        loop {
53            let u1 = self.next_f64();
54            let u2 = self.next_f64();
55            if u1 > 1e-15 {
56                return (-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos();
57            }
58        }
59    }
60
61    /// Sample from N(mean, std).
62    fn next_gaussian(&mut self, mean: f64, std: f64) -> f64 {
63        mean + std * self.next_normal()
64    }
65}
66
67// ─────────────────────────────────────────────────────────────────────────────
68// Helper math functions
69// ─────────────────────────────────────────────────────────────────────────────
70
71/// Compute the mean of a slice of values.
72///
73/// Returns 0.0 for an empty slice.
74pub fn mean(data: &[f64]) -> f64 {
75    if data.is_empty() {
76        return 0.0;
77    }
78    data.iter().sum::<f64>() / data.len() as f64
79}
80
81/// Compute the variance (population) of a slice.
82///
83/// Returns 0.0 for slices with fewer than 2 elements.
84pub fn variance(data: &[f64]) -> f64 {
85    if data.len() < 2 {
86        return 0.0;
87    }
88    let m = mean(data);
89    data.iter().map(|x| (x - m).powi(2)).sum::<f64>() / data.len() as f64
90}
91
92/// Compute the standard deviation (population) of a slice.
93pub fn std_dev(data: &[f64]) -> f64 {
94    variance(data).sqrt()
95}
96
97/// Sort a slice in-place and return the p-th percentile (linear interpolation).
98///
99/// `p` should be in \[0, 100\].
100pub fn percentile(data: &mut Vec<f64>, p: f64) -> f64 {
101    if data.is_empty() {
102        return 0.0;
103    }
104    data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
105    let idx = (p / 100.0) * (data.len() - 1) as f64;
106    let lo = idx.floor() as usize;
107    let hi = (lo + 1).min(data.len() - 1);
108    let frac = idx - lo as f64;
109    data[lo] * (1.0 - frac) + data[hi] * frac
110}
111
112/// Inverse CDF of the standard normal (rational approximation).
113///
114/// Maps p ∈ (0,1) to x such that Φ(x) = p.
115pub fn probit(p: f64) -> f64 {
116    let p = p.clamp(1e-15, 1.0 - 1e-15);
117    let p_adj = if p < 0.5 { p } else { 1.0 - p };
118    let t = (-2.0 * p_adj.ln()).sqrt();
119    let c0 = 2.515_517;
120    let c1 = 0.802_853;
121    let c2 = 0.010_328;
122    let d1 = 1.432_788;
123    let d2 = 0.189_269;
124    let d3 = 0.001_308;
125    let num = c0 + c1 * t + c2 * t * t;
126    let den = 1.0 + d1 * t + d2 * t * t + d3 * t * t * t;
127    let x = t - num / den;
128    if p >= 0.5 { x } else { -x }
129}
130
131/// Standard normal CDF (error function approximation).
132///
133/// Returns Φ(x) = P(Z ≤ x) for Z ~ N(0,1).
134pub fn normal_cdf(x: f64) -> f64 {
135    0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
136}
137
138/// Error function (Horner's method approximation).
139pub fn erf(x: f64) -> f64 {
140    // Abramowitz & Stegun 7.1.26
141    let sign = if x < 0.0 { -1.0 } else { 1.0 };
142    let x = x.abs();
143    let t = 1.0 / (1.0 + 0.3275911 * x);
144    let poly = t
145        * (0.254_829_592
146            + t * (-0.284_496_736
147                + t * (1.421_413_741 + t * (-1.453_152_027 + t * 1.061_405_429))));
148    sign * (1.0 - poly * (-x * x).exp())
149}
150
151// ─────────────────────────────────────────────────────────────────────────────
152// 1. Monte Carlo Uncertainty Propagation
153// ─────────────────────────────────────────────────────────────────────────────
154
155/// Description of a single uncertain input parameter.
156#[derive(Debug, Clone)]
157pub struct UncertainParameter {
158    /// Mean value of the parameter.
159    pub mean: f64,
160    /// Standard deviation of the parameter.
161    pub std: f64,
162}
163
164impl UncertainParameter {
165    /// Create a new uncertain parameter with given mean and standard deviation.
166    pub fn new(mean: f64, std: f64) -> Self {
167        Self { mean, std }
168    }
169}
170
171/// Results from a Monte Carlo uncertainty propagation run.
172#[derive(Debug, Clone)]
173pub struct McUqResult {
174    /// Propagated output samples.
175    pub samples: Vec<f64>,
176    /// Sample mean of the output.
177    pub mean: f64,
178    /// Sample standard deviation of the output.
179    pub std: f64,
180    /// 5th percentile of the output.
181    pub p05: f64,
182    /// 95th percentile of the output.
183    pub p95: f64,
184}
185
186/// Run Monte Carlo uncertainty propagation.
187///
188/// Draws `n_samples` realisations of `params` (each Gaussian), evaluates
189/// `model(inputs)`, and returns statistics on the output distribution.
190///
191/// # Arguments
192/// * `params` - Slice of uncertain input parameters.
193/// * `n_samples` - Number of Monte Carlo samples.
194/// * `model` - Function mapping a slice of inputs to a scalar output.
195/// * `seed` - RNG seed for reproducibility.
196pub fn monte_carlo_propagation(
197    params: &[UncertainParameter],
198    n_samples: usize,
199    model: &dyn Fn(&[f64]) -> f64,
200    seed: u64,
201) -> McUqResult {
202    let mut rng = UqRng::new(seed);
203    let mut outputs = Vec::with_capacity(n_samples);
204    let mut inputs = vec![0.0_f64; params.len()];
205    for _ in 0..n_samples {
206        for (i, p) in params.iter().enumerate() {
207            inputs[i] = rng.next_gaussian(p.mean, p.std);
208        }
209        outputs.push(model(&inputs));
210    }
211    let m = mean(&outputs);
212    let s = std_dev(&outputs);
213    let mut sorted = outputs.clone();
214    let p05 = percentile(&mut sorted, 5.0);
215    let mut sorted2 = outputs.clone();
216    let p95 = percentile(&mut sorted2, 95.0);
217    McUqResult {
218        samples: outputs,
219        mean: m,
220        std: s,
221        p05,
222        p95,
223    }
224}
225
226// ─────────────────────────────────────────────────────────────────────────────
227// 2. Latin Hypercube Sampling (LHS)
228// ─────────────────────────────────────────────────────────────────────────────
229
230/// A Latin Hypercube Sample: rows = samples, columns = dimensions.
231#[derive(Debug, Clone)]
232pub struct LatinHypercubeSample {
233    /// Number of samples.
234    pub n_samples: usize,
235    /// Number of dimensions (input variables).
236    pub n_dims: usize,
237    /// Sample matrix, row-major: `data[i * n_dims + j]` is sample i, dim j.
238    /// Values are in \[0, 1\].
239    pub data: Vec<f64>,
240}
241
242impl LatinHypercubeSample {
243    /// Access sample `i`, dimension `j`.
244    pub fn get(&self, i: usize, j: usize) -> f64 {
245        self.data[i * self.n_dims + j]
246    }
247}
248
249/// Generate a Latin Hypercube Sample with `n_samples` points in `n_dims` dimensions.
250///
251/// Each dimension is stratified into `n_samples` equal-width intervals;
252/// one point is drawn uniformly from each interval, then the points are
253/// randomly permuted per dimension (mid-point LHS variant).
254///
255/// Returns a [`LatinHypercubeSample`] with values in \[0, 1\].
256pub fn latin_hypercube_sample(n_samples: usize, n_dims: usize, seed: u64) -> LatinHypercubeSample {
257    let mut rng = UqRng::new(seed);
258    let mut data = vec![0.0_f64; n_samples * n_dims];
259    let step = 1.0 / n_samples as f64;
260
261    for j in 0..n_dims {
262        // Create stratified points for this dimension
263        let mut col: Vec<f64> = (0..n_samples)
264            .map(|i| (i as f64 + rng.next_f64()) * step)
265            .collect();
266        // Fisher-Yates shuffle
267        for k in (1..n_samples).rev() {
268            let idx = (rng.next_u64() as usize) % (k + 1);
269            col.swap(k, idx);
270        }
271        for i in 0..n_samples {
272            data[i * n_dims + j] = col[i];
273        }
274    }
275    LatinHypercubeSample {
276        n_samples,
277        n_dims,
278        data,
279    }
280}
281
282/// Scale LHS samples from \[0,1\] to \[low, high\] for each dimension.
283///
284/// `bounds` must have exactly `lhs.n_dims` entries of `(low, high)`.
285pub fn scale_lhs(lhs: &LatinHypercubeSample, bounds: &[(f64, f64)]) -> Vec<Vec<f64>> {
286    assert_eq!(bounds.len(), lhs.n_dims);
287    (0..lhs.n_samples)
288        .map(|i| {
289            (0..lhs.n_dims)
290                .map(|j| {
291                    let (lo, hi) = bounds[j];
292                    lo + lhs.get(i, j) * (hi - lo)
293                })
294                .collect()
295        })
296        .collect()
297}
298
299// ─────────────────────────────────────────────────────────────────────────────
300// 3. Sensitivity Analysis
301// ─────────────────────────────────────────────────────────────────────────────
302
303// ── 3a. Morris Method ────────────────────────────────────────────────────────
304
305/// Result of a Morris (elementary effects) sensitivity screening.
306#[derive(Debug, Clone)]
307pub struct MorrisResult {
308    /// Mean of absolute elementary effects μ* per input dimension.
309    pub mu_star: Vec<f64>,
310    /// Standard deviation of elementary effects σ per input dimension.
311    pub sigma: Vec<f64>,
312}
313
314/// Run the Morris method (elementary effects) for `n_dims` inputs.
315///
316/// Each trajectory perturbs one input at a time by `delta` and records the
317/// elementary effect on the model output.  `r` trajectories are generated.
318///
319/// # Arguments
320/// * `n_dims` - Number of input dimensions.
321/// * `r` - Number of Morris trajectories.
322/// * `delta` - Fractional step size (e.g. 0.1).
323/// * `model` - Scalar model function.
324/// * `bounds` - `(low, high)` for each dimension.
325/// * `seed` - RNG seed.
326pub fn morris_sensitivity(
327    n_dims: usize,
328    r: usize,
329    delta: f64,
330    model: &dyn Fn(&[f64]) -> f64,
331    bounds: &[(f64, f64)],
332    seed: u64,
333) -> MorrisResult {
334    let mut rng = UqRng::new(seed);
335    let mut all_effects: Vec<Vec<f64>> = vec![Vec::new(); n_dims];
336
337    for _ in 0..r {
338        // Random starting point
339        let mut x: Vec<f64> = bounds
340            .iter()
341            .map(|(lo, hi)| lo + rng.next_f64() * (hi - lo))
342            .collect();
343        // Random permutation of dimensions
344        let mut perm: Vec<usize> = (0..n_dims).collect();
345        for k in (1..n_dims).rev() {
346            let idx = (rng.next_u64() as usize) % (k + 1);
347            perm.swap(k, idx);
348        }
349        let mut y0 = model(&x);
350        for &dim in &perm {
351            let (lo, hi) = bounds[dim];
352            let step = delta * (hi - lo);
353            // Ensure we stay within bounds
354            let new_val = if x[dim] + step <= hi {
355                x[dim] + step
356            } else {
357                x[dim] - step
358            };
359            let old_val = x[dim];
360            x[dim] = new_val;
361            let y1 = model(&x);
362            let ee = (y1 - y0) / (new_val - old_val);
363            all_effects[dim].push(ee);
364            y0 = y1;
365        }
366    }
367
368    let mu_star: Vec<f64> = all_effects
369        .iter()
370        .map(|effects| {
371            if effects.is_empty() {
372                0.0
373            } else {
374                effects.iter().map(|e| e.abs()).sum::<f64>() / effects.len() as f64
375            }
376        })
377        .collect();
378
379    let sigma: Vec<f64> = all_effects
380        .iter()
381        .map(|effects| {
382            if effects.len() < 2 {
383                return 0.0;
384            }
385            let m = effects.iter().sum::<f64>() / effects.len() as f64;
386            let v =
387                effects.iter().map(|e| (e - m).powi(2)).sum::<f64>() / (effects.len() - 1) as f64;
388            v.sqrt()
389        })
390        .collect();
391
392    MorrisResult { mu_star, sigma }
393}
394
395// ── 3b. Sobol Indices ────────────────────────────────────────────────────────
396
397/// First-order and total-order Sobol sensitivity indices.
398#[derive(Debug, Clone)]
399pub struct SobolIndices {
400    /// First-order Sobol index S_i for each input dimension.
401    pub first_order: Vec<f64>,
402    /// Total-order Sobol index T_i for each input dimension.
403    pub total_order: Vec<f64>,
404}
405
406/// Estimate Sobol sensitivity indices using the Saltelli (2010) estimator.
407///
408/// Uses `n_base` quasi-random base samples (LHS-generated) and the Jansen
409/// estimator for total-order indices.
410///
411/// # Arguments
412/// * `n_dims` - Number of input variables.
413/// * `n_base` - Base sample size N; total model evaluations ≈ N·(n_dims+2).
414/// * `model` - Scalar model function mapping inputs → output.
415/// * `bounds` - `(low, high)` per dimension.
416/// * `seed` - RNG seed.
417pub fn sobol_indices(
418    n_dims: usize,
419    n_base: usize,
420    model: &dyn Fn(&[f64]) -> f64,
421    bounds: &[(f64, f64)],
422    seed: u64,
423) -> SobolIndices {
424    // Generate two independent LHS sample matrices A and B
425    let lhs_a = latin_hypercube_sample(n_base, n_dims, seed);
426    let lhs_b = latin_hypercube_sample(n_base, n_dims, seed.wrapping_add(0xDEAD_BEEF));
427
428    let a_mat = scale_lhs(&lhs_a, bounds);
429    let b_mat = scale_lhs(&lhs_b, bounds);
430
431    // Evaluate model on A and B
432    let fa: Vec<f64> = a_mat.iter().map(|x| model(x)).collect();
433    let fb: Vec<f64> = b_mat.iter().map(|x| model(x)).collect();
434
435    let var_y = {
436        let all: Vec<f64> = fa.iter().chain(fb.iter()).copied().collect();
437        variance(&all)
438    };
439
440    let mut first_order = vec![0.0_f64; n_dims];
441    let mut total_order = vec![0.0_f64; n_dims];
442
443    let n = n_base as f64;
444
445    for dim in 0..n_dims {
446        // Build A_B^(i): A with column i replaced by B's column i
447        let ab_i: Vec<Vec<f64>> = (0..n_base)
448            .map(|k| {
449                let mut row = a_mat[k].clone();
450                row[dim] = b_mat[k][dim];
451                row
452            })
453            .collect();
454
455        let fab_i: Vec<f64> = ab_i.iter().map(|x| model(x)).collect();
456
457        // Saltelli (2010) first-order estimator: S_i = (1/N) Σ f_B(f_AB_i - f_A) / Var
458        let s1_num: f64 = (0..n_base).map(|k| fb[k] * (fab_i[k] - fa[k])).sum::<f64>() / n;
459
460        // Jansen total-order estimator: T_i = (1/(2N)) Σ (f_A - f_AB_i)² / Var
461        let st_num: f64 = (0..n_base).map(|k| (fa[k] - fab_i[k]).powi(2)).sum::<f64>() / (2.0 * n);
462
463        first_order[dim] = if var_y > 1e-15 { s1_num / var_y } else { 0.0 };
464        total_order[dim] = if var_y > 1e-15 { st_num / var_y } else { 0.0 };
465    }
466
467    SobolIndices {
468        first_order,
469        total_order,
470    }
471}
472
473// ─────────────────────────────────────────────────────────────────────────────
474// 4. Polynomial Chaos Expansion (PCE)
475// ─────────────────────────────────────────────────────────────────────────────
476
477/// Computes the n-th Hermite polynomial He_n(x) (probabilists' convention).
478///
479/// He_0 = 1, He_1 = x, He_{n+1} = x·He_n − n·He_{n−1}.
480pub fn hermite_poly(n: usize, x: f64) -> f64 {
481    match n {
482        0 => 1.0,
483        1 => x,
484        _ => {
485            let mut h_prev = 1.0;
486            let mut h_curr = x;
487            for k in 1..n {
488                let h_next = x * h_curr - k as f64 * h_prev;
489                h_prev = h_curr;
490                h_curr = h_next;
491            }
492            h_curr
493        }
494    }
495}
496
497/// Polynomial Chaos Expansion surrogate model.
498///
499/// Represents an output Y as a linear combination of Hermite basis polynomials
500/// evaluated at standardised Gaussian inputs.
501#[derive(Debug, Clone)]
502pub struct PolynomialChaosExpansion {
503    /// PCE coefficients, one per multi-index.
504    pub coeffs: Vec<f64>,
505    /// Multi-indices: each entry is a vector of polynomial degrees per dimension.
506    pub multi_indices: Vec<Vec<usize>>,
507    /// Number of input dimensions.
508    pub n_dims: usize,
509}
510
511impl PolynomialChaosExpansion {
512    /// Evaluate the PCE at a point `x` (standardised Gaussian inputs).
513    ///
514    /// `x.len()` must equal `self.n_dims`.
515    pub fn evaluate(&self, x: &[f64]) -> f64 {
516        assert_eq!(x.len(), self.n_dims);
517        self.coeffs
518            .iter()
519            .zip(self.multi_indices.iter())
520            .map(|(c, idx)| {
521                let basis: f64 = idx
522                    .iter()
523                    .zip(x.iter())
524                    .map(|(&deg, &xi)| hermite_poly(deg, xi))
525                    .product();
526                c * basis
527            })
528            .sum()
529    }
530
531    /// Mean of the PCE (= coefficient of the zero-order term).
532    pub fn pce_mean(&self) -> f64 {
533        // The zero multi-index has all degrees = 0; He_0 = 1, <He_0²> = 1
534        self.coeffs
535            .iter()
536            .zip(self.multi_indices.iter())
537            .find(|(_, idx)| idx.iter().all(|&d| d == 0))
538            .map(|(c, _)| *c)
539            .unwrap_or(0.0)
540    }
541
542    /// Variance of the PCE (= sum of squares of non-zero-order coefficients).
543    ///
544    /// Uses the orthogonality of Hermite polynomials: ⟨He_α, He_α⟩ = α!.
545    pub fn pce_variance(&self) -> f64 {
546        self.coeffs
547            .iter()
548            .zip(self.multi_indices.iter())
549            .filter(|(_, idx)| !idx.iter().all(|&d| d == 0))
550            .map(|(c, idx)| {
551                let norm_sq: f64 = idx.iter().map(|&d| factorial(d) as f64).product();
552                c * c * norm_sq
553            })
554            .sum()
555    }
556}
557
558fn factorial(n: usize) -> u64 {
559    (1..=n as u64).product()
560}
561
562/// Generate all multi-indices of total degree ≤ `max_degree` for `n_dims` dimensions.
563pub fn pce_multi_indices(n_dims: usize, max_degree: usize) -> Vec<Vec<usize>> {
564    let mut result = Vec::new();
565    pce_multi_indices_rec(n_dims, max_degree, 0, &mut Vec::new(), &mut result);
566    result
567}
568
569fn pce_multi_indices_rec(
570    n_dims: usize,
571    remaining: usize,
572    dim: usize,
573    current: &mut Vec<usize>,
574    result: &mut Vec<Vec<usize>>,
575) {
576    if dim == n_dims {
577        result.push(current.clone());
578        return;
579    }
580    let max_here = if dim == n_dims - 1 {
581        remaining
582    } else {
583        remaining
584    };
585    for d in 0..=max_here {
586        current.push(d);
587        if dim + 1 < n_dims {
588            pce_multi_indices_rec(n_dims, remaining - d, dim + 1, current, result);
589        } else {
590            result.push(current.clone());
591        }
592        current.pop();
593    }
594}
595
596/// Fit a PCE by non-intrusive (regression) approach using LHS-sampled training data.
597///
598/// Solves the least-squares problem Ψ c = Y for the coefficient vector c.
599///
600/// # Arguments
601/// * `n_dims` - Number of Gaussian input dimensions.
602/// * `max_degree` - Maximum total polynomial degree.
603/// * `n_train` - Number of training samples (recommend ≥ 2 × n_basis).
604/// * `model` - Model function on standardised Gaussian inputs.
605/// * `seed` - RNG seed.
606pub fn fit_pce(
607    n_dims: usize,
608    max_degree: usize,
609    n_train: usize,
610    model: &dyn Fn(&[f64]) -> f64,
611    seed: u64,
612) -> PolynomialChaosExpansion {
613    let multi_indices = pce_multi_indices(n_dims, max_degree);
614    let n_basis = multi_indices.len();
615
616    // Generate training points (standard normal via probit transform of LHS)
617    let lhs = latin_hypercube_sample(n_train, n_dims, seed);
618
619    let mut x_train: Vec<Vec<f64>> = (0..n_train)
620        .map(|i| {
621            (0..n_dims)
622                .map(|j| probit(lhs.get(i, j).clamp(1e-6, 1.0 - 1e-6)))
623                .collect()
624        })
625        .collect();
626
627    let y_train: Vec<f64> = x_train.iter_mut().map(|x| model(x)).collect();
628
629    // Build Vandermonde-like matrix Ψ: n_train × n_basis
630    let mut psi = vec![0.0_f64; n_train * n_basis];
631    for (i, xi) in x_train.iter().enumerate() {
632        for (j, idx) in multi_indices.iter().enumerate() {
633            let basis: f64 = idx
634                .iter()
635                .zip(xi.iter())
636                .map(|(&deg, &x)| hermite_poly(deg, x))
637                .product();
638            psi[i * n_basis + j] = basis;
639        }
640    }
641
642    // Solve normal equations Ψᵀ Ψ c = Ψᵀ y (simple, not production quality)
643    let coeffs = normal_equations_solve(&psi, &y_train, n_train, n_basis);
644
645    PolynomialChaosExpansion {
646        coeffs,
647        multi_indices,
648        n_dims,
649    }
650}
651
652/// Solve the normal equations A^T A x = A^T b using Cholesky or Gaussian elimination.
653fn normal_equations_solve(a: &[f64], b: &[f64], m: usize, n: usize) -> Vec<f64> {
654    // Compute Aᵀ A (n×n) and Aᵀ b (n)
655    let mut ata = vec![0.0_f64; n * n];
656    let mut atb = vec![0.0_f64; n];
657    for i in 0..m {
658        for j in 0..n {
659            atb[j] += a[i * n + j] * b[i];
660            for k in 0..n {
661                ata[j * n + k] += a[i * n + j] * a[i * n + k];
662            }
663        }
664    }
665    // Add small regularisation
666    for j in 0..n {
667        ata[j * n + j] += 1e-10;
668    }
669    // Forward Gaussian elimination with partial pivoting
670    let mut x = atb.clone();
671    let mut mat = ata;
672    for col in 0..n {
673        // Find pivot
674        let mut max_val = mat[col * n + col].abs();
675        let mut max_row = col;
676        for row in (col + 1)..n {
677            if mat[row * n + col].abs() > max_val {
678                max_val = mat[row * n + col].abs();
679                max_row = row;
680            }
681        }
682        if max_row != col {
683            for k in 0..n {
684                mat.swap(col * n + k, max_row * n + k);
685            }
686            x.swap(col, max_row);
687        }
688        let pivot = mat[col * n + col];
689        if pivot.abs() < 1e-18 {
690            continue;
691        }
692        for row in (col + 1)..n {
693            let factor = mat[row * n + col] / pivot;
694            for k in col..n {
695                let tmp = mat[col * n + k] * factor;
696                mat[row * n + k] -= tmp;
697            }
698            x[row] -= x[col] * factor;
699        }
700    }
701    // Back substitution
702    for col in (0..n).rev() {
703        let pivot = mat[col * n + col];
704        if pivot.abs() < 1e-18 {
705            continue;
706        }
707        x[col] /= pivot;
708        for row in 0..col {
709            let tmp = mat[row * n + col] * x[col];
710            x[row] -= tmp;
711        }
712    }
713    x
714}
715
716// ─────────────────────────────────────────────────────────────────────────────
717// 5. Stochastic Collocation
718// ─────────────────────────────────────────────────────────────────────────────
719
720/// A collocation node (quadrature point + weight) for one dimension.
721#[derive(Debug, Clone, Copy)]
722pub struct CollocationNode {
723    /// Quadrature abscissa (standardised).
724    pub point: f64,
725    /// Quadrature weight.
726    pub weight: f64,
727}
728
729/// Gauss-Hermite quadrature nodes and weights for n-point rule.
730///
731/// Suitable for integrals of the form ∫ f(x) exp(−x²) dx.
732/// Returns nodes sorted in ascending order.
733///
734/// Supported orders: 1, 2, 3, 4, 5.
735pub fn gauss_hermite_nodes(order: usize) -> Vec<CollocationNode> {
736    // Tabulated values from Abramowitz & Stegun
737    match order {
738        1 => vec![CollocationNode {
739            point: 0.0,
740            weight: 1.772_453_851_f64, // √π ≈ 1.7724538509055159
741        }],
742        2 => vec![
743            CollocationNode {
744                point: -FRAC_1_SQRT_2,
745                weight: 0.886_226_925,
746            },
747            CollocationNode {
748                point: FRAC_1_SQRT_2,
749                weight: 0.886_226_925,
750            },
751        ],
752        3 => vec![
753            CollocationNode {
754                point: -1.224_744_871,
755                weight: 0.295_408_975,
756            },
757            CollocationNode {
758                point: 0.0,
759                weight: 1.181_635_901,
760            },
761            CollocationNode {
762                point: 1.224_744_871,
763                weight: 0.295_408_975,
764            },
765        ],
766        4 => vec![
767            CollocationNode {
768                point: -1.650_680_123,
769                weight: 0.081_312_835,
770            },
771            CollocationNode {
772                point: -0.524_647_623,
773                weight: 0.804_914_090,
774            },
775            CollocationNode {
776                point: 0.524_647_623,
777                weight: 0.804_914_090,
778            },
779            CollocationNode {
780                point: 1.650_680_123,
781                weight: 0.081_312_835,
782            },
783        ],
784        5 => vec![
785            CollocationNode {
786                point: -2.020_182_470,
787                weight: 0.019_953_242,
788            },
789            CollocationNode {
790                point: -0.958_572_464,
791                weight: 0.394_424_314,
792            },
793            CollocationNode {
794                point: 0.0,
795                weight: 0.945_308_722,
796            },
797            CollocationNode {
798                point: 0.958_572_464,
799                weight: 0.394_424_314,
800            },
801            CollocationNode {
802                point: 2.020_182_470,
803                weight: 0.019_953_242,
804            },
805        ],
806        _ => gauss_hermite_nodes(3), // fallback
807    }
808}
809
810/// Compute mean and variance of a model via tensor-product stochastic collocation.
811///
812/// Integrates E\[Y\] and E\[Y²\] using Gauss-Hermite quadrature for Gaussian inputs.
813///
814/// # Arguments
815/// * `n_dims` - Number of independent Gaussian inputs.
816/// * `order` - Gauss-Hermite quadrature order per dimension (1–5).
817/// * `model` - Model on standardised Gaussian inputs.
818pub fn stochastic_collocation(
819    n_dims: usize,
820    order: usize,
821    model: &dyn Fn(&[f64]) -> f64,
822) -> (f64, f64) {
823    let nodes_1d = gauss_hermite_nodes(order);
824    let _n_pts_1d = nodes_1d.len();
825
826    // Compute total weight (normalise by π^(n/2) for standard normal measure)
827    let pi_n_half = PI.powf(n_dims as f64 / 2.0);
828
829    // Enumerate all multi-index combinations via recursion
830    let mut mean_acc = 0.0_f64;
831    let mut sq_acc = 0.0_f64;
832    let mut x = vec![0.0_f64; n_dims];
833    collocation_recurse(
834        &nodes_1d,
835        n_dims,
836        0,
837        1.0,
838        &mut x,
839        model,
840        &mut mean_acc,
841        &mut sq_acc,
842    );
843
844    mean_acc /= pi_n_half;
845    sq_acc /= pi_n_half;
846    let var = (sq_acc - mean_acc * mean_acc).max(0.0);
847    (mean_acc, var)
848}
849
850#[allow(clippy::too_many_arguments)]
851fn collocation_recurse(
852    nodes: &[CollocationNode],
853    n_dims: usize,
854    dim: usize,
855    w_acc: f64,
856    x: &mut Vec<f64>,
857    model: &dyn Fn(&[f64]) -> f64,
858    mean_acc: &mut f64,
859    sq_acc: &mut f64,
860) {
861    if dim == n_dims {
862        let y = model(x);
863        // Gauss-Hermite weights already encode the e^{-x^2} factor; use directly.
864        *mean_acc += w_acc * y;
865        *sq_acc += w_acc * y * y;
866        return;
867    }
868    for node in nodes {
869        x[dim] = node.point * 2.0_f64.sqrt(); // scale to N(0,1)
870        collocation_recurse(
871            nodes,
872            n_dims,
873            dim + 1,
874            w_acc * node.weight,
875            x,
876            model,
877            mean_acc,
878            sq_acc,
879        );
880    }
881}
882
883// ─────────────────────────────────────────────────────────────────────────────
884// 6. Gaussian Process Regression for UQ
885// ─────────────────────────────────────────────────────────────────────────────
886
887/// Squared-exponential (RBF) kernel: k(x, x') = σ² exp(-‖x−x'‖² / (2ℓ²)).
888///
889/// # Arguments
890/// * `x` - First input vector.
891/// * `xp` - Second input vector.
892/// * `sigma_f` - Signal standard deviation σ_f.
893/// * `length_scale` - Length scale ℓ.
894pub fn rbf_kernel(x: &[f64], xp: &[f64], sigma_f: f64, length_scale: f64) -> f64 {
895    let sq_dist: f64 = x.iter().zip(xp.iter()).map(|(a, b)| (a - b).powi(2)).sum();
896    sigma_f * sigma_f * (-sq_dist / (2.0 * length_scale * length_scale)).exp()
897}
898
899/// A fitted Gaussian Process regression model.
900#[derive(Debug, Clone)]
901pub struct GaussianProcessSurrogate {
902    /// Training input points (row-major: n_train × n_dims).
903    pub x_train: Vec<Vec<f64>>,
904    /// Training output values.
905    pub y_train: Vec<f64>,
906    /// Alpha vector = (K + σ²_n I)^{-1} y  (pre-computed for prediction).
907    pub alpha: Vec<f64>,
908    /// Signal standard deviation σ_f.
909    pub sigma_f: f64,
910    /// Length scale ℓ.
911    pub length_scale: f64,
912    /// Noise standard deviation σ_n.
913    pub noise_std: f64,
914}
915
916impl GaussianProcessSurrogate {
917    /// Predict the posterior mean and variance at a new point `x_star`.
918    pub fn predict(&self, x_star: &[f64]) -> (f64, f64) {
919        let k_star: Vec<f64> = self
920            .x_train
921            .iter()
922            .map(|xi| rbf_kernel(xi, x_star, self.sigma_f, self.length_scale))
923            .collect();
924
925        let mean_pred: f64 = k_star
926            .iter()
927            .zip(self.alpha.iter())
928            .map(|(k, a)| k * a)
929            .sum();
930
931        // Predictive variance (simplified diagonal version via K_star_star - k*^T K^{-1} k*)
932        let k_ss = rbf_kernel(x_star, x_star, self.sigma_f, self.length_scale);
933        // We approximate K^{-1} k* via the already-computed (K + σ²I)^{-1} alpha approach
934        // For a full implementation use Cholesky; here we use the diagonal approximation.
935        let var_pred = (k_ss
936            - k_star.iter().map(|k| k * k).sum::<f64>() / (self.sigma_f * self.sigma_f + 1e-8))
937            .max(0.0);
938
939        (mean_pred, var_pred)
940    }
941}
942
943/// Fit a Gaussian Process surrogate using exact GP regression.
944///
945/// Solves (K + σ²_n I) α = y via simple Gaussian elimination.
946///
947/// # Arguments
948/// * `x_train` - Training inputs (one vector per sample).
949/// * `y_train` - Training outputs.
950/// * `sigma_f` - Signal standard deviation.
951/// * `length_scale` - RBF length scale.
952/// * `noise_std` - Observation noise standard deviation.
953pub fn fit_gp(
954    x_train: Vec<Vec<f64>>,
955    y_train: Vec<f64>,
956    sigma_f: f64,
957    length_scale: f64,
958    noise_std: f64,
959) -> GaussianProcessSurrogate {
960    let n = x_train.len();
961    // Build K + σ²_n I
962    let mut k = vec![0.0_f64; n * n];
963    for i in 0..n {
964        for j in 0..n {
965            k[i * n + j] = rbf_kernel(&x_train[i], &x_train[j], sigma_f, length_scale);
966        }
967        k[i * n + i] += noise_std * noise_std;
968    }
969    // Solve Kα = y
970    let alpha = gaussian_eliminate(&k, &y_train, n);
971    GaussianProcessSurrogate {
972        x_train,
973        y_train,
974        alpha,
975        sigma_f,
976        length_scale,
977        noise_std,
978    }
979}
980
981/// Solve A x = b via Gaussian elimination with partial pivoting.
982fn gaussian_eliminate(a: &[f64], b: &[f64], n: usize) -> Vec<f64> {
983    let mut mat = a.to_vec();
984    let mut rhs = b.to_vec();
985    for col in 0..n {
986        let mut max_val = mat[col * n + col].abs();
987        let mut max_row = col;
988        for row in (col + 1)..n {
989            if mat[row * n + col].abs() > max_val {
990                max_val = mat[row * n + col].abs();
991                max_row = row;
992            }
993        }
994        if max_row != col {
995            for k in 0..n {
996                mat.swap(col * n + k, max_row * n + k);
997            }
998            rhs.swap(col, max_row);
999        }
1000        let pivot = mat[col * n + col];
1001        if pivot.abs() < 1e-18 {
1002            continue;
1003        }
1004        for row in (col + 1)..n {
1005            let factor = mat[row * n + col] / pivot;
1006            for k in col..n {
1007                let tmp = mat[col * n + k] * factor;
1008                mat[row * n + k] -= tmp;
1009            }
1010            rhs[row] -= rhs[col] * factor;
1011        }
1012    }
1013    for col in (0..n).rev() {
1014        let pivot = mat[col * n + col];
1015        if pivot.abs() < 1e-18 {
1016            continue;
1017        }
1018        rhs[col] /= pivot;
1019        for row in 0..col {
1020            let tmp = mat[row * n + col] * rhs[col];
1021            rhs[row] -= tmp;
1022        }
1023    }
1024    rhs
1025}
1026
1027// ─────────────────────────────────────────────────────────────────────────────
1028// 7. Reliability Analysis (FORM / SORM)
1029// ─────────────────────────────────────────────────────────────────────────────
1030
1031/// Result of a FORM reliability analysis.
1032#[derive(Debug, Clone)]
1033pub struct FormResult {
1034    /// Most probable point (MPP) in standard normal space.
1035    pub beta_u: Vec<f64>,
1036    /// Reliability index β = ‖u*‖.
1037    pub beta: f64,
1038    /// FORM probability of failure P_f ≈ Φ(−β).
1039    pub pf_form: f64,
1040}
1041
1042/// First-Order Reliability Method (FORM) via Hasofer-Lind iteration.
1043///
1044/// Finds the most probable point (MPP) on the limit-state surface g(u) = 0
1045/// in the standard normal space by iterative gradient projection.
1046///
1047/// # Arguments
1048/// * `n_dims` - Number of standard normal input dimensions.
1049/// * `g` - Limit-state function: g(u) < 0 means failure.
1050/// * `max_iter` - Maximum number of HL-RF iterations.
1051/// * `tol` - Convergence tolerance on β.
1052/// * `h` - Finite-difference step for gradient.
1053pub fn form_analysis(
1054    n_dims: usize,
1055    g: &dyn Fn(&[f64]) -> f64,
1056    max_iter: usize,
1057    tol: f64,
1058    h: f64,
1059) -> FormResult {
1060    let mut u = vec![0.0_f64; n_dims]; // start at origin
1061    let mut beta = 0.0_f64;
1062
1063    for _ in 0..max_iter {
1064        let g0 = g(&u);
1065        // Numerical gradient
1066        let mut grad = vec![0.0_f64; n_dims];
1067        for k in 0..n_dims {
1068            let mut u_plus = u.clone();
1069            u_plus[k] += h;
1070            grad[k] = (g(&u_plus) - g0) / h;
1071        }
1072        let grad_norm: f64 = grad.iter().map(|x| x * x).sum::<f64>().sqrt();
1073        if grad_norm < 1e-15 {
1074            break;
1075        }
1076        // HL-RF update: u_{k+1} = (∇g · u − g) / |∇g|² · ∇g
1077        let dot_grad_u: f64 = grad.iter().zip(u.iter()).map(|(g, uk)| g * uk).sum();
1078        let factor = (dot_grad_u - g0) / (grad_norm * grad_norm);
1079        let u_new: Vec<f64> = grad.iter().map(|gi| factor * gi).collect();
1080        let new_beta: f64 = u_new.iter().map(|x| x * x).sum::<f64>().sqrt();
1081        if (new_beta - beta).abs() < tol {
1082            u = u_new;
1083            beta = new_beta;
1084            break;
1085        }
1086        beta = new_beta;
1087        u = u_new;
1088    }
1089
1090    let pf = normal_cdf(-beta);
1091    FormResult {
1092        beta_u: u,
1093        beta,
1094        pf_form: pf,
1095    }
1096}
1097
1098/// Second-Order Reliability Method (SORM) correction factor.
1099///
1100/// Applies the Breitung correction to the FORM result.  Requires principal
1101/// curvatures κ_i at the MPP (positive = convex toward safe domain).
1102///
1103/// Returns the SORM-corrected probability of failure.
1104pub fn sorm_breitung(beta: f64, curvatures: &[f64]) -> f64 {
1105    let correction: f64 = curvatures
1106        .iter()
1107        .map(|kappa| {
1108            let denom = (1.0 + beta * kappa).max(1e-10);
1109            1.0 / denom.sqrt()
1110        })
1111        .product();
1112    normal_cdf(-beta) * correction
1113}
1114
1115// ─────────────────────────────────────────────────────────────────────────────
1116// 8. Bootstrap Confidence Intervals
1117// ─────────────────────────────────────────────────────────────────────────────
1118
1119/// Bootstrap percentile confidence interval for a statistic.
1120///
1121/// Resamples `data` with replacement `n_boot` times, computes the statistic
1122/// on each resample, and returns the \[alpha/2, 1−alpha/2\] percentile interval.
1123///
1124/// # Arguments
1125/// * `data` - Observed data.
1126/// * `statistic` - Function computing the statistic from a data slice.
1127/// * `n_boot` - Number of bootstrap resamples.
1128/// * `alpha` - Significance level (e.g. 0.05 for 95% CI).
1129/// * `seed` - RNG seed.
1130///
1131/// Returns `(lower_bound, upper_bound)`.
1132pub fn bootstrap_ci(
1133    data: &[f64],
1134    statistic: &dyn Fn(&[f64]) -> f64,
1135    n_boot: usize,
1136    alpha: f64,
1137    seed: u64,
1138) -> (f64, f64) {
1139    let n = data.len();
1140    let mut rng = UqRng::new(seed);
1141    let mut boot_stats: Vec<f64> = Vec::with_capacity(n_boot);
1142    let mut sample = vec![0.0_f64; n];
1143    for _ in 0..n_boot {
1144        for s in sample.iter_mut() {
1145            *s = data[(rng.next_u64() as usize) % n];
1146        }
1147        boot_stats.push(statistic(&sample));
1148    }
1149    let lo = percentile(&mut boot_stats.clone(), alpha / 2.0 * 100.0);
1150    let hi = percentile(&mut boot_stats, (1.0 - alpha / 2.0) * 100.0);
1151    (lo, hi)
1152}
1153
1154// ─────────────────────────────────────────────────────────────────────────────
1155// 9. MCMC — Metropolis-Hastings Bayesian Inference
1156// ─────────────────────────────────────────────────────────────────────────────
1157
1158/// Configuration for the Metropolis-Hastings sampler.
1159#[derive(Debug, Clone)]
1160pub struct MhConfig {
1161    /// Number of MCMC samples to draw.
1162    pub n_samples: usize,
1163    /// Burn-in period (samples discarded).
1164    pub burn_in: usize,
1165    /// Proposal step standard deviation.
1166    pub step_size: f64,
1167    /// RNG seed.
1168    pub seed: u64,
1169}
1170
1171impl Default for MhConfig {
1172    fn default() -> Self {
1173        Self {
1174            n_samples: 5000,
1175            burn_in: 500,
1176            step_size: 0.1,
1177            seed: 42,
1178        }
1179    }
1180}
1181
1182/// Result of Metropolis-Hastings sampling.
1183#[derive(Debug, Clone)]
1184pub struct MhResult {
1185    /// Posterior samples (post burn-in), shape = n_kept × n_dims.
1186    pub samples: Vec<Vec<f64>>,
1187    /// Overall acceptance rate.
1188    pub acceptance_rate: f64,
1189}
1190
1191/// Metropolis-Hastings MCMC sampler.
1192///
1193/// Draws samples from a target (unnormalised) log-posterior.
1194///
1195/// # Arguments
1196/// * `log_posterior` - Function computing log p(θ | data) up to a constant.
1197/// * `initial` - Starting point in parameter space.
1198/// * `config` - Sampler configuration.
1199pub fn metropolis_hastings(
1200    log_posterior: &dyn Fn(&[f64]) -> f64,
1201    initial: &[f64],
1202    config: &MhConfig,
1203) -> MhResult {
1204    let _n_dims = initial.len();
1205    let mut rng = UqRng::new(config.seed);
1206    let mut current = initial.to_vec();
1207    let mut log_p_current = log_posterior(&current);
1208    let total = config.n_samples + config.burn_in;
1209    let mut samples = Vec::with_capacity(config.n_samples);
1210    let mut n_accepted = 0_usize;
1211
1212    for step in 0..total {
1213        // Random-walk Gaussian proposal
1214        let proposal: Vec<f64> = current
1215            .iter()
1216            .map(|x| x + rng.next_normal() * config.step_size)
1217            .collect();
1218        let log_p_proposal = log_posterior(&proposal);
1219        let log_accept = log_p_proposal - log_p_current;
1220        let u = rng.next_f64();
1221        if log_accept >= 0.0 || u < log_accept.exp() {
1222            current = proposal;
1223            log_p_current = log_p_proposal;
1224            if step >= config.burn_in {
1225                n_accepted += 1;
1226            }
1227        }
1228        if step >= config.burn_in {
1229            samples.push(current.clone());
1230        }
1231    }
1232    let acceptance_rate = n_accepted as f64 / config.n_samples as f64;
1233    MhResult {
1234        samples,
1235        acceptance_rate,
1236    }
1237}
1238
1239// ─────────────────────────────────────────────────────────────────────────────
1240// 10. Probability of Failure Estimation
1241// ─────────────────────────────────────────────────────────────────────────────
1242
1243/// Estimate the probability of failure P_f = P(g(X) ≤ 0) via crude Monte Carlo.
1244///
1245/// # Arguments
1246/// * `params` - Input uncertain parameters.
1247/// * `limit_state` - Function g(x); failure when g(x) ≤ 0.
1248/// * `n_samples` - Number of MC samples.
1249/// * `seed` - RNG seed.
1250///
1251/// Returns `(pf_estimate, coefficient_of_variation)`.
1252pub fn probability_of_failure_mc(
1253    params: &[UncertainParameter],
1254    limit_state: &dyn Fn(&[f64]) -> f64,
1255    n_samples: usize,
1256    seed: u64,
1257) -> (f64, f64) {
1258    let mut rng = UqRng::new(seed);
1259    let mut n_fail = 0_usize;
1260    let mut inputs = vec![0.0_f64; params.len()];
1261    for _ in 0..n_samples {
1262        for (i, p) in params.iter().enumerate() {
1263            inputs[i] = rng.next_gaussian(p.mean, p.std);
1264        }
1265        if limit_state(&inputs) <= 0.0 {
1266            n_fail += 1;
1267        }
1268    }
1269    let pf = n_fail as f64 / n_samples as f64;
1270    let cov = if pf > 0.0 {
1271        ((1.0 - pf) / (pf * n_samples as f64)).sqrt()
1272    } else {
1273        f64::INFINITY
1274    };
1275    (pf, cov)
1276}
1277
1278/// Importance sampling estimate of P_f with a shifted Gaussian importance density.
1279///
1280/// Shifts the sampling distribution to concentrate near the MPP `u_star` in
1281/// standard normal space.
1282///
1283/// # Arguments
1284/// * `n_dims` - Number of standard normal input dimensions.
1285/// * `limit_state_std` - Limit-state function in standard normal space.
1286/// * `u_star` - Most probable point (MPP) from FORM.
1287/// * `n_samples` - Number of importance samples.
1288/// * `seed` - RNG seed.
1289pub fn importance_sampling_pf(
1290    n_dims: usize,
1291    limit_state_std: &dyn Fn(&[f64]) -> f64,
1292    u_star: &[f64],
1293    n_samples: usize,
1294    seed: u64,
1295) -> f64 {
1296    let mut rng = UqRng::new(seed);
1297    let mut sum_w = 0.0_f64;
1298    let mut sum_iw = 0.0_f64;
1299    let mut u = vec![0.0_f64; n_dims];
1300    for _ in 0..n_samples {
1301        // Sample from N(u_star, I)
1302        for k in 0..n_dims {
1303            u[k] = u_star[k] + rng.next_normal();
1304        }
1305        // Likelihood ratio w(u) = φ(u) / q(u) = exp(-|u|²/2) / exp(-|u−u*|²/2)
1306        let norm_u_sq: f64 = u.iter().map(|x| x * x).sum();
1307        let norm_diff_sq: f64 = u
1308            .iter()
1309            .zip(u_star.iter())
1310            .map(|(ui, ui_star)| (ui - ui_star).powi(2))
1311            .sum();
1312        let log_w = -0.5 * norm_u_sq + 0.5 * norm_diff_sq;
1313        let w = log_w.exp();
1314        let indicator = if limit_state_std(&u) <= 0.0 { 1.0 } else { 0.0 };
1315        sum_iw += indicator * w;
1316        sum_w += w;
1317    }
1318    if sum_w < 1e-30 { 0.0 } else { sum_iw / sum_w }
1319}
1320
1321/// Compute the reliability index β from a probability of failure estimate.
1322///
1323/// β = −Φ⁻¹(P_f).
1324pub fn reliability_index(pf: f64) -> f64 {
1325    -probit(pf.clamp(1e-15, 1.0 - 1e-15))
1326}
1327
1328// ─────────────────────────────────────────────────────────────────────────────
1329// 11. Moment-independent sensitivity indices (Delta indices)
1330// ─────────────────────────────────────────────────────────────────────────────
1331
1332/// Estimate the moment-independent delta sensitivity index for dimension `dim`.
1333///
1334/// Δ_i = 0.5 · E\[|f_Y(y) − f_{Y|Xi}(y)|\] approximated via binning.
1335///
1336/// # Arguments
1337/// * `x_samples` - Matrix of input samples (n_samples × n_dims).
1338/// * `y_samples` - Corresponding output samples.
1339/// * `dim` - Input dimension to analyse.
1340/// * `n_bins` - Number of bins for density estimation.
1341pub fn delta_sensitivity(
1342    x_samples: &[Vec<f64>],
1343    y_samples: &[f64],
1344    dim: usize,
1345    n_bins: usize,
1346) -> f64 {
1347    let n = y_samples.len();
1348    if n == 0 || n_bins == 0 {
1349        return 0.0;
1350    }
1351
1352    let y_min = y_samples.iter().cloned().fold(f64::INFINITY, f64::min);
1353    let y_max = y_samples.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
1354    let range = (y_max - y_min).max(1e-15);
1355    let bin_w = range / n_bins as f64;
1356
1357    // Marginal density (histogram)
1358    let mut f_y = vec![0.0_f64; n_bins];
1359    for &yi in y_samples {
1360        let bin = ((yi - y_min) / bin_w).floor() as usize;
1361        f_y[bin.min(n_bins - 1)] += 1.0 / (n as f64 * bin_w);
1362    }
1363
1364    // Get unique values of x_dim and stratify
1365    let x_dim: Vec<f64> = x_samples.iter().map(|row| row[dim]).collect();
1366    let x_min = x_dim.iter().cloned().fold(f64::INFINITY, f64::min);
1367    let x_max = x_dim.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
1368    let x_range = (x_max - x_min).max(1e-15);
1369    let n_x_bins = (n.isqrt()).max(2);
1370    let x_bin_w = x_range / n_x_bins as f64;
1371
1372    let mut delta = 0.0_f64;
1373    for xi_bin in 0..n_x_bins {
1374        let x_lo = x_min + xi_bin as f64 * x_bin_w;
1375        let x_hi = x_lo + x_bin_w;
1376        let sub_y: Vec<f64> = x_dim
1377            .iter()
1378            .zip(y_samples.iter())
1379            .filter(|&(&xi, _)| xi >= x_lo && xi < x_hi)
1380            .map(|(_, &yi)| yi)
1381            .collect();
1382        if sub_y.is_empty() {
1383            continue;
1384        }
1385        let p_x = sub_y.len() as f64 / n as f64;
1386        let mut f_y_given_x = vec![0.0_f64; n_bins];
1387        for &yi in &sub_y {
1388            let bin = ((yi - y_min) / bin_w).floor() as usize;
1389            f_y_given_x[bin.min(n_bins - 1)] += 1.0 / (sub_y.len() as f64 * bin_w);
1390        }
1391        let tv: f64 = f_y
1392            .iter()
1393            .zip(f_y_given_x.iter())
1394            .map(|(a, b)| (a - b).abs() * bin_w)
1395            .sum();
1396        delta += p_x * tv;
1397    }
1398    delta * 0.5
1399}
1400
1401// ─────────────────────────────────────────────────────────────────────────────
1402// 12. Coefficient of Variation and standard UQ metrics
1403// ─────────────────────────────────────────────────────────────────────────────
1404
1405/// Coefficient of variation CoV = σ / |μ|.
1406///
1407/// Returns `f64::INFINITY` when the mean is zero.
1408pub fn coefficient_of_variation(data: &[f64]) -> f64 {
1409    let m = mean(data);
1410    if m.abs() < 1e-15 {
1411        return f64::INFINITY;
1412    }
1413    std_dev(data) / m.abs()
1414}
1415
1416/// Skewness of a dataset = E\[(X−μ)³\] / σ³.
1417pub fn skewness(data: &[f64]) -> f64 {
1418    if data.len() < 3 {
1419        return 0.0;
1420    }
1421    let m = mean(data);
1422    let s = std_dev(data);
1423    if s < 1e-15 {
1424        return 0.0;
1425    }
1426    let n = data.len() as f64;
1427    data.iter().map(|x| ((x - m) / s).powi(3)).sum::<f64>() / n
1428}
1429
1430/// Excess kurtosis = E\[(X−μ)⁴\] / σ⁴ − 3.
1431pub fn excess_kurtosis(data: &[f64]) -> f64 {
1432    if data.len() < 4 {
1433        return 0.0;
1434    }
1435    let m = mean(data);
1436    let s = std_dev(data);
1437    if s < 1e-15 {
1438        return 0.0;
1439    }
1440    let n = data.len() as f64;
1441    data.iter().map(|x| ((x - m) / s).powi(4)).sum::<f64>() / n - 3.0
1442}
1443
1444/// Compute a P-box (probability box) from a set of CDFs.
1445///
1446/// Given a collection of sample sets (one per scenario), returns the lower
1447/// and upper CDF bounds at a vector of evaluation points.
1448///
1449/// # Arguments
1450/// * `scenarios` - Each element is a sample set for one scenario.
1451/// * `eval_points` - Points at which to evaluate the CDF bounds.
1452///
1453/// Returns `(cdf_lower, cdf_upper)` each of length `eval_points.len()`.
1454pub fn probability_box(scenarios: &[Vec<f64>], eval_points: &[f64]) -> (Vec<f64>, Vec<f64>) {
1455    let n_pts = eval_points.len();
1456    let mut lower = vec![f64::INFINITY; n_pts];
1457    let mut upper = vec![f64::NEG_INFINITY; n_pts];
1458
1459    for scenario in scenarios {
1460        let n = scenario.len() as f64;
1461        for (k, &ep) in eval_points.iter().enumerate() {
1462            let cdf = scenario.iter().filter(|&&x| x <= ep).count() as f64 / n;
1463            if cdf < lower[k] {
1464                lower[k] = cdf;
1465            }
1466            if cdf > upper[k] {
1467                upper[k] = cdf;
1468            }
1469        }
1470    }
1471    (lower, upper)
1472}
1473
1474// ─────────────────────────────────────────────────────────────────────────────
1475// Tests
1476// ─────────────────────────────────────────────────────────────────────────────
1477
1478#[cfg(test)]
1479mod tests {
1480    use super::*;
1481
1482    // ── helper ───────────────────────────────────────────────────────────────
1483
1484    fn linear_model(x: &[f64]) -> f64 {
1485        x.iter()
1486            .enumerate()
1487            .map(|(i, xi)| (i + 1) as f64 * xi)
1488            .sum()
1489    }
1490
1491    // ── mean / variance / std_dev ─────────────────────────────────────────────
1492
1493    #[test]
1494    fn test_mean_empty() {
1495        assert_eq!(mean(&[]), 0.0);
1496    }
1497
1498    #[test]
1499    fn test_mean_single() {
1500        assert!((mean(&[42.0]) - 42.0).abs() < 1e-12);
1501    }
1502
1503    #[test]
1504    fn test_mean_basic() {
1505        assert!((mean(&[1.0, 2.0, 3.0, 4.0, 5.0]) - 3.0).abs() < 1e-12);
1506    }
1507
1508    #[test]
1509    fn test_variance_zero() {
1510        assert_eq!(variance(&[5.0, 5.0, 5.0]), 0.0);
1511    }
1512
1513    #[test]
1514    fn test_variance_known() {
1515        let data = [2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
1516        let v = variance(&data);
1517        assert!((v - 4.0).abs() < 0.1, "variance={v}");
1518    }
1519
1520    #[test]
1521    fn test_std_dev_nonneg() {
1522        let data: Vec<f64> = (1..=100).map(|i| i as f64).collect();
1523        assert!(std_dev(&data) > 0.0);
1524    }
1525
1526    // ── probit / normal_cdf ───────────────────────────────────────────────────
1527
1528    #[test]
1529    fn test_probit_symmetry() {
1530        assert!((probit(0.5)).abs() < 1e-3);
1531    }
1532
1533    #[test]
1534    fn test_normal_cdf_symmetry() {
1535        assert!((normal_cdf(0.0) - 0.5).abs() < 1e-6);
1536    }
1537
1538    #[test]
1539    fn test_probit_normal_cdf_inverse() {
1540        for p in [0.1, 0.25, 0.5, 0.75, 0.9] {
1541            let x = probit(p);
1542            let p2 = normal_cdf(x);
1543            assert!((p2 - p).abs() < 1e-3, "p={p} p2={p2}");
1544        }
1545    }
1546
1547    // ── percentile ────────────────────────────────────────────────────────────
1548
1549    #[test]
1550    fn test_percentile_median() {
1551        let mut data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1552        assert!((percentile(&mut data, 50.0) - 3.0).abs() < 1e-10);
1553    }
1554
1555    #[test]
1556    fn test_percentile_min_max() {
1557        let mut data = vec![10.0, 20.0, 30.0];
1558        assert!((percentile(&mut data, 0.0) - 10.0).abs() < 1e-10);
1559        let mut data2 = vec![10.0, 20.0, 30.0];
1560        assert!((percentile(&mut data2, 100.0) - 30.0).abs() < 1e-10);
1561    }
1562
1563    // ── hermite_poly ─────────────────────────────────────────────────────────
1564
1565    #[test]
1566    fn test_hermite_poly_degree0() {
1567        assert_eq!(hermite_poly(0, 3.5), 1.0);
1568    }
1569
1570    #[test]
1571    fn test_hermite_poly_degree1() {
1572        assert!((hermite_poly(1, 2.7) - 2.7).abs() < 1e-12);
1573    }
1574
1575    #[test]
1576    fn test_hermite_poly_degree2() {
1577        // He_2(x) = x² - 1
1578        let x = 2.0;
1579        assert!((hermite_poly(2, x) - (x * x - 1.0)).abs() < 1e-12);
1580    }
1581
1582    #[test]
1583    fn test_hermite_poly_degree3() {
1584        // He_3(x) = x³ - 3x
1585        let x = 1.5;
1586        assert!((hermite_poly(3, x) - (x.powi(3) - 3.0 * x)).abs() < 1e-12);
1587    }
1588
1589    // ── LHS ──────────────────────────────────────────────────────────────────
1590
1591    #[test]
1592    fn test_lhs_shape() {
1593        let lhs = latin_hypercube_sample(10, 3, 42);
1594        assert_eq!(lhs.n_samples, 10);
1595        assert_eq!(lhs.n_dims, 3);
1596        assert_eq!(lhs.data.len(), 30);
1597    }
1598
1599    #[test]
1600    fn test_lhs_bounds() {
1601        let lhs = latin_hypercube_sample(50, 4, 7);
1602        for v in &lhs.data {
1603            assert!(*v >= 0.0 && *v <= 1.0, "value out of [0,1]: {v}");
1604        }
1605    }
1606
1607    #[test]
1608    fn test_lhs_stratification_1d() {
1609        let n = 20;
1610        let lhs = latin_hypercube_sample(n, 1, 123);
1611        let mut bins = vec![false; n];
1612        let step = 1.0 / n as f64;
1613        for i in 0..n {
1614            let v = lhs.get(i, 0);
1615            let b = (v / step).floor() as usize;
1616            let b = b.min(n - 1);
1617            bins[b] = true;
1618        }
1619        assert!(bins.iter().all(|&b| b), "LHS should cover all strata");
1620    }
1621
1622    #[test]
1623    fn test_scale_lhs() {
1624        let lhs = latin_hypercube_sample(5, 2, 0);
1625        let bounds = [(0.0, 10.0), (-5.0, 5.0)];
1626        let scaled = scale_lhs(&lhs, &bounds);
1627        for row in &scaled {
1628            assert!(row[0] >= 0.0 && row[0] <= 10.0);
1629            assert!(row[1] >= -5.0 && row[1] <= 5.0);
1630        }
1631    }
1632
1633    // ── Monte Carlo propagation ───────────────────────────────────────────────
1634
1635    #[test]
1636    fn test_mc_propagation_mean() {
1637        // Y = X1 + X2, X1 ~ N(2,0.1), X2 ~ N(3,0.1) => E[Y] ≈ 5
1638        let params = vec![
1639            UncertainParameter::new(2.0, 0.1),
1640            UncertainParameter::new(3.0, 0.1),
1641        ];
1642        let result = monte_carlo_propagation(&params, 5000, &|x| x[0] + x[1], 1);
1643        assert!((result.mean - 5.0).abs() < 0.1, "mean={}", result.mean);
1644    }
1645
1646    #[test]
1647    fn test_mc_propagation_std() {
1648        // Y = X, X ~ N(0, 1) => σ_Y ≈ 1
1649        let params = vec![UncertainParameter::new(0.0, 1.0)];
1650        let result = monte_carlo_propagation(&params, 10_000, &|x| x[0], 99);
1651        assert!((result.std - 1.0).abs() < 0.1, "std={}", result.std);
1652    }
1653
1654    #[test]
1655    fn test_mc_propagation_percentiles() {
1656        let params = vec![UncertainParameter::new(0.0, 1.0)];
1657        let result = monte_carlo_propagation(&params, 10_000, &|x| x[0], 7);
1658        // p05 should be ≈ -1.645, p95 ≈ +1.645
1659        assert!(result.p05 < 0.0 && result.p95 > 0.0);
1660    }
1661
1662    // ── Morris sensitivity ────────────────────────────────────────────────────
1663
1664    #[test]
1665    fn test_morris_sensitivity_shape() {
1666        let bounds = vec![(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)];
1667        let result = morris_sensitivity(3, 20, 0.1, &linear_model, &bounds, 5);
1668        assert_eq!(result.mu_star.len(), 3);
1669        assert_eq!(result.sigma.len(), 3);
1670    }
1671
1672    #[test]
1673    fn test_morris_sensitivity_rank_order() {
1674        // linear_model = 1*x0 + 2*x1 + 3*x2 — x2 has largest coefficient, mu_star[2] >= mu_star[1] >= mu_star[0]
1675        let bounds = vec![(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)];
1676        let result = morris_sensitivity(3, 50, 0.1, &linear_model, &bounds, 13);
1677        assert!(
1678            result.mu_star[2] >= result.mu_star[1],
1679            "mu*[2]={} mu*[1]={}",
1680            result.mu_star[2],
1681            result.mu_star[1]
1682        );
1683        assert!(
1684            result.mu_star[1] >= result.mu_star[0],
1685            "mu*[1]={} mu*[0]={}",
1686            result.mu_star[1],
1687            result.mu_star[0]
1688        );
1689    }
1690
1691    // ── Sobol indices ─────────────────────────────────────────────────────────
1692
1693    #[test]
1694    fn test_sobol_shape() {
1695        let bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1696        let indices = sobol_indices(2, 100, &|x| x[0] + x[1], &bounds, 42);
1697        assert_eq!(indices.first_order.len(), 2);
1698        assert_eq!(indices.total_order.len(), 2);
1699    }
1700
1701    #[test]
1702    fn test_sobol_additive_model() {
1703        // For Y = X1 + X2 (independent uniform), S1 ≈ S2 ≈ 0.5
1704        let bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1705        let indices = sobol_indices(2, 500, &|x| x[0] + x[1], &bounds, 99);
1706        assert!(
1707            (indices.first_order[0] - indices.first_order[1]).abs() < 0.2,
1708            "S1={} S2={}",
1709            indices.first_order[0],
1710            indices.first_order[1]
1711        );
1712    }
1713
1714    // ── Stochastic collocation ────────────────────────────────────────────────
1715
1716    #[test]
1717    fn test_collocation_constant_model() {
1718        let (m, v) = stochastic_collocation(1, 3, &|_| 5.0);
1719        assert!((m - 5.0).abs() < 1e-6, "mean={m}");
1720        assert!(v < 1e-10, "var={v}");
1721    }
1722
1723    #[test]
1724    fn test_collocation_linear_model_mean() {
1725        // Y = X, X ~ N(0,1) => E[Y] = 0
1726        let (m, _v) = stochastic_collocation(1, 5, &|x| x[0]);
1727        assert!(m.abs() < 0.1, "mean={m}");
1728    }
1729
1730    // ── GP regression ─────────────────────────────────────────────────────────
1731
1732    #[test]
1733    fn test_gp_fit_predict_interpolation() {
1734        let x_train: Vec<Vec<f64>> = (0..5).map(|i| vec![i as f64]).collect();
1735        let y_train: Vec<f64> = x_train.iter().map(|x| x[0] * x[0]).collect();
1736        let gp = fit_gp(x_train.clone(), y_train.clone(), 2.0, 1.5, 0.01);
1737        // Predict at training points — should be close to training values
1738        for (i, xi) in x_train.iter().enumerate() {
1739            let (pred_m, _pred_v) = gp.predict(xi);
1740            assert!(
1741                (pred_m - y_train[i]).abs() < 2.0,
1742                "i={i} pred={pred_m} true={}",
1743                y_train[i]
1744            );
1745        }
1746    }
1747
1748    #[test]
1749    fn test_rbf_kernel_symmetry() {
1750        let x = vec![1.0, 2.0];
1751        let y = vec![3.0, 1.0];
1752        let k1 = rbf_kernel(&x, &y, 1.0, 1.0);
1753        let k2 = rbf_kernel(&y, &x, 1.0, 1.0);
1754        assert!((k1 - k2).abs() < 1e-12);
1755    }
1756
1757    #[test]
1758    fn test_rbf_kernel_self_similarity() {
1759        let x = vec![1.0, 2.0, 3.0];
1760        let k = rbf_kernel(&x, &x, 2.0, 1.0);
1761        assert!((k - 4.0).abs() < 1e-12); // sigma_f^2 = 4
1762    }
1763
1764    // ── FORM ──────────────────────────────────────────────────────────────────
1765
1766    #[test]
1767    fn test_form_simple_limit_state() {
1768        // g(u) = β_target - u1 (limit state is u1 = β_target)
1769        let beta_target = 2.0;
1770        let result = form_analysis(1, &|u| beta_target - u[0], 50, 1e-6, 1e-5);
1771        assert!(
1772            (result.beta - beta_target).abs() < 0.1,
1773            "beta={}",
1774            result.beta
1775        );
1776    }
1777
1778    #[test]
1779    fn test_form_pf_decreases_with_beta() {
1780        let r1 = form_analysis(2, &|u| 3.0 - u[0] - u[1], 50, 1e-6, 1e-5);
1781        let r2 = form_analysis(2, &|u| 1.0 - u[0] - u[1], 50, 1e-6, 1e-5);
1782        assert!(
1783            r1.pf_form < r2.pf_form,
1784            "pf1={} pf2={}",
1785            r1.pf_form,
1786            r2.pf_form
1787        );
1788    }
1789
1790    // ── SORM ──────────────────────────────────────────────────────────────────
1791
1792    #[test]
1793    fn test_sorm_zero_curvature_equals_form() {
1794        let beta = 2.0;
1795        let pf_form = normal_cdf(-beta);
1796        let pf_sorm = sorm_breitung(beta, &[]);
1797        assert!((pf_sorm - pf_form).abs() < 1e-12);
1798    }
1799
1800    #[test]
1801    fn test_sorm_positive_curvature_reduces_pf() {
1802        let beta = 2.0;
1803        let pf_form = normal_cdf(-beta);
1804        let pf_sorm = sorm_breitung(beta, &[0.5]);
1805        assert!(pf_sorm < pf_form, "pf_sorm={pf_sorm} pf_form={pf_form}");
1806    }
1807
1808    // ── Bootstrap CI ─────────────────────────────────────────────────────────
1809
1810    #[test]
1811    fn test_bootstrap_ci_contains_true_mean() {
1812        let data: Vec<f64> = (0..100).map(|i| i as f64 / 100.0).collect();
1813        let (lo, hi) = bootstrap_ci(&data, &|s| mean(s), 1000, 0.05, 42);
1814        let true_mean = 0.495;
1815        assert!(
1816            lo <= true_mean && true_mean <= hi,
1817            "CI=[{lo},{hi}] true_mean={true_mean}"
1818        );
1819    }
1820
1821    #[test]
1822    fn test_bootstrap_ci_ordering() {
1823        let data: Vec<f64> = (1..=50).map(|i| i as f64).collect();
1824        let (lo, hi) = bootstrap_ci(&data, &|s| mean(s), 500, 0.1, 7);
1825        assert!(lo <= hi, "lo={lo} hi={hi}");
1826    }
1827
1828    // ── Metropolis-Hastings ────────────────────────────────────────────────────
1829
1830    #[test]
1831    fn test_mh_samples_count() {
1832        let config = MhConfig {
1833            n_samples: 200,
1834            burn_in: 50,
1835            step_size: 0.5,
1836            seed: 1,
1837        };
1838        let log_post = |theta: &[f64]| -0.5 * theta[0] * theta[0];
1839        let result = metropolis_hastings(&log_post, &[0.0], &config);
1840        assert_eq!(result.samples.len(), 200);
1841    }
1842
1843    #[test]
1844    fn test_mh_normal_target_mean() {
1845        // Target: N(3, 1), log p ∝ -0.5(x-3)^2
1846        let config = MhConfig {
1847            n_samples: 5000,
1848            burn_in: 500,
1849            step_size: 1.0,
1850            seed: 42,
1851        };
1852        let log_post = |theta: &[f64]| -0.5 * (theta[0] - 3.0).powi(2);
1853        let result = metropolis_hastings(&log_post, &[0.0], &config);
1854        let vals: Vec<f64> = result.samples.iter().map(|s| s[0]).collect();
1855        let m = mean(&vals);
1856        assert!((m - 3.0).abs() < 0.3, "mean={m}");
1857    }
1858
1859    #[test]
1860    fn test_mh_acceptance_rate_reasonable() {
1861        let config = MhConfig {
1862            n_samples: 1000,
1863            burn_in: 100,
1864            step_size: 0.5,
1865            seed: 77,
1866        };
1867        let log_post = |theta: &[f64]| -0.5 * theta[0] * theta[0];
1868        let result = metropolis_hastings(&log_post, &[0.0], &config);
1869        // Acceptance rate should be between 5% and 95%
1870        assert!(
1871            result.acceptance_rate > 0.05 && result.acceptance_rate < 0.95,
1872            "acceptance_rate={}",
1873            result.acceptance_rate
1874        );
1875    }
1876
1877    // ── Probability of failure ─────────────────────────────────────────────────
1878
1879    #[test]
1880    fn test_pof_mc_certain_failure() {
1881        // g(x) = -1 always — should always fail
1882        let params = vec![UncertainParameter::new(0.0, 1.0)];
1883        let (pf, _cov) = probability_of_failure_mc(&params, &|_| -1.0, 1000, 0);
1884        assert!((pf - 1.0).abs() < 1e-10, "pf={pf}");
1885    }
1886
1887    #[test]
1888    fn test_pof_mc_certain_safe() {
1889        // g(x) = 1 always — should never fail
1890        let params = vec![UncertainParameter::new(0.0, 1.0)];
1891        let (pf, _cov) = probability_of_failure_mc(&params, &|_| 1.0, 1000, 0);
1892        assert_eq!(pf, 0.0);
1893    }
1894
1895    #[test]
1896    fn test_pof_mc_reasonable_estimate() {
1897        // g(x) = 2 - x, X ~ N(0,1): failure when g <= 0 means x >= 2 => P_f = 1 - Φ(2) ≈ 0.0228
1898        let params = vec![UncertainParameter::new(0.0, 1.0)];
1899        let (pf, _cov) = probability_of_failure_mc(&params, &|x| 2.0 - x[0], 50_000, 5);
1900        assert!((pf - 0.0228).abs() < 0.01, "pf={pf}, expected≈0.0228");
1901    }
1902
1903    // ── Statistics helpers ─────────────────────────────────────────────────────
1904
1905    #[test]
1906    fn test_cov_finite() {
1907        let data = vec![1.0, 2.0, 3.0];
1908        let cov = coefficient_of_variation(&data);
1909        assert!(cov.is_finite() && cov > 0.0);
1910    }
1911
1912    #[test]
1913    fn test_skewness_symmetric() {
1914        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1915        assert!(skewness(&data).abs() < 0.1);
1916    }
1917
1918    #[test]
1919    fn test_excess_kurtosis_normal() {
1920        // Generate many N(0,1) samples via the internal RNG
1921        let mut rng = UqRng::new(123);
1922        let data: Vec<f64> = (0..5000).map(|_| rng.next_normal()).collect();
1923        let k = excess_kurtosis(&data);
1924        assert!(k.abs() < 0.5, "excess_kurtosis={k}");
1925    }
1926
1927    // ── P-box ─────────────────────────────────────────────────────────────────
1928
1929    #[test]
1930    fn test_probability_box_bounds() {
1931        let s1 = vec![1.0, 2.0, 3.0, 4.0];
1932        let s2 = vec![0.5, 1.5, 3.5, 5.0];
1933        let (lower, upper) = probability_box(&[s1, s2], &[2.0, 3.0]);
1934        assert!(lower[0] <= upper[0]);
1935        assert!(lower[1] <= upper[1]);
1936    }
1937
1938    // ── Reliability index ──────────────────────────────────────────────────────
1939
1940    #[test]
1941    fn test_reliability_index_known() {
1942        // P_f = Φ(-β) => β = -Φ⁻¹(P_f)
1943        let pf = normal_cdf(-2.0);
1944        let beta = reliability_index(pf);
1945        assert!((beta - 2.0).abs() < 0.05, "beta={beta}");
1946    }
1947
1948    // ── PCE mean and variance ──────────────────────────────────────────────────
1949
1950    #[test]
1951    fn test_pce_mean_constant_model() {
1952        let pce = fit_pce(1, 1, 10, &|_| 7.0, 42);
1953        assert!(
1954            (pce.pce_mean() - 7.0).abs() < 0.5,
1955            "mean={}",
1956            pce.pce_mean()
1957        );
1958    }
1959
1960    #[test]
1961    fn test_pce_evaluate_consistency() {
1962        let pce = fit_pce(2, 2, 30, &|x| x[0] + x[1], 3);
1963        // E[X1 + X2] = 0 for standard normal
1964        let pred = pce.evaluate(&[0.0, 0.0]);
1965        assert!(pred.abs() < 2.0, "pred={pred}");
1966    }
1967
1968    // ── Gauss-Hermite nodes ────────────────────────────────────────────────────
1969
1970    #[test]
1971    fn test_gauss_hermite_order1_weight() {
1972        let nodes = gauss_hermite_nodes(1);
1973        assert_eq!(nodes.len(), 1);
1974        assert!((nodes[0].weight - PI.sqrt()).abs() < 1e-6);
1975    }
1976
1977    #[test]
1978    fn test_gauss_hermite_order3_symmetry() {
1979        let nodes = gauss_hermite_nodes(3);
1980        assert_eq!(nodes.len(), 3);
1981        // Nodes should be symmetric: nodes[0].point ≈ -nodes[2].point
1982        assert!((nodes[0].point + nodes[2].point).abs() < 1e-8);
1983    }
1984}