1#![allow(clippy::if_same_then_else, clippy::ptr_arg)]
2#![allow(dead_code)]
20#![allow(clippy::too_many_arguments)]
21
22use std::f64::consts::{FRAC_1_SQRT_2, PI};
23
24struct UqRng {
30 state: u64,
31}
32
33impl UqRng {
34 fn new(seed: u64) -> Self {
35 Self { state: seed.max(1) }
36 }
37
38 fn next_u64(&mut self) -> u64 {
39 self.state = self
40 .state
41 .wrapping_mul(6_364_136_223_846_793_005)
42 .wrapping_add(1_442_695_040_888_963_407);
43 self.state
44 }
45
46 fn next_f64(&mut self) -> f64 {
47 (self.next_u64() >> 11) as f64 * (1.0 / (1u64 << 53) as f64)
48 }
49
50 fn next_normal(&mut self) -> f64 {
52 loop {
53 let u1 = self.next_f64();
54 let u2 = self.next_f64();
55 if u1 > 1e-15 {
56 return (-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos();
57 }
58 }
59 }
60
61 fn next_gaussian(&mut self, mean: f64, std: f64) -> f64 {
63 mean + std * self.next_normal()
64 }
65}
66
67pub fn mean(data: &[f64]) -> f64 {
75 if data.is_empty() {
76 return 0.0;
77 }
78 data.iter().sum::<f64>() / data.len() as f64
79}
80
81pub fn variance(data: &[f64]) -> f64 {
85 if data.len() < 2 {
86 return 0.0;
87 }
88 let m = mean(data);
89 data.iter().map(|x| (x - m).powi(2)).sum::<f64>() / data.len() as f64
90}
91
92pub fn std_dev(data: &[f64]) -> f64 {
94 variance(data).sqrt()
95}
96
97pub fn percentile(data: &mut Vec<f64>, p: f64) -> f64 {
101 if data.is_empty() {
102 return 0.0;
103 }
104 data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
105 let idx = (p / 100.0) * (data.len() - 1) as f64;
106 let lo = idx.floor() as usize;
107 let hi = (lo + 1).min(data.len() - 1);
108 let frac = idx - lo as f64;
109 data[lo] * (1.0 - frac) + data[hi] * frac
110}
111
112pub fn probit(p: f64) -> f64 {
116 let p = p.clamp(1e-15, 1.0 - 1e-15);
117 let p_adj = if p < 0.5 { p } else { 1.0 - p };
118 let t = (-2.0 * p_adj.ln()).sqrt();
119 let c0 = 2.515_517;
120 let c1 = 0.802_853;
121 let c2 = 0.010_328;
122 let d1 = 1.432_788;
123 let d2 = 0.189_269;
124 let d3 = 0.001_308;
125 let num = c0 + c1 * t + c2 * t * t;
126 let den = 1.0 + d1 * t + d2 * t * t + d3 * t * t * t;
127 let x = t - num / den;
128 if p >= 0.5 { x } else { -x }
129}
130
131pub fn normal_cdf(x: f64) -> f64 {
135 0.5 * (1.0 + erf(x / 2.0_f64.sqrt()))
136}
137
138pub fn erf(x: f64) -> f64 {
140 let sign = if x < 0.0 { -1.0 } else { 1.0 };
142 let x = x.abs();
143 let t = 1.0 / (1.0 + 0.3275911 * x);
144 let poly = t
145 * (0.254_829_592
146 + t * (-0.284_496_736
147 + t * (1.421_413_741 + t * (-1.453_152_027 + t * 1.061_405_429))));
148 sign * (1.0 - poly * (-x * x).exp())
149}
150
151#[derive(Debug, Clone)]
157pub struct UncertainParameter {
158 pub mean: f64,
160 pub std: f64,
162}
163
164impl UncertainParameter {
165 pub fn new(mean: f64, std: f64) -> Self {
167 Self { mean, std }
168 }
169}
170
171#[derive(Debug, Clone)]
173pub struct McUqResult {
174 pub samples: Vec<f64>,
176 pub mean: f64,
178 pub std: f64,
180 pub p05: f64,
182 pub p95: f64,
184}
185
186pub fn monte_carlo_propagation(
197 params: &[UncertainParameter],
198 n_samples: usize,
199 model: &dyn Fn(&[f64]) -> f64,
200 seed: u64,
201) -> McUqResult {
202 let mut rng = UqRng::new(seed);
203 let mut outputs = Vec::with_capacity(n_samples);
204 let mut inputs = vec![0.0_f64; params.len()];
205 for _ in 0..n_samples {
206 for (i, p) in params.iter().enumerate() {
207 inputs[i] = rng.next_gaussian(p.mean, p.std);
208 }
209 outputs.push(model(&inputs));
210 }
211 let m = mean(&outputs);
212 let s = std_dev(&outputs);
213 let mut sorted = outputs.clone();
214 let p05 = percentile(&mut sorted, 5.0);
215 let mut sorted2 = outputs.clone();
216 let p95 = percentile(&mut sorted2, 95.0);
217 McUqResult {
218 samples: outputs,
219 mean: m,
220 std: s,
221 p05,
222 p95,
223 }
224}
225
226#[derive(Debug, Clone)]
232pub struct LatinHypercubeSample {
233 pub n_samples: usize,
235 pub n_dims: usize,
237 pub data: Vec<f64>,
240}
241
242impl LatinHypercubeSample {
243 pub fn get(&self, i: usize, j: usize) -> f64 {
245 self.data[i * self.n_dims + j]
246 }
247}
248
249pub fn latin_hypercube_sample(n_samples: usize, n_dims: usize, seed: u64) -> LatinHypercubeSample {
257 let mut rng = UqRng::new(seed);
258 let mut data = vec![0.0_f64; n_samples * n_dims];
259 let step = 1.0 / n_samples as f64;
260
261 for j in 0..n_dims {
262 let mut col: Vec<f64> = (0..n_samples)
264 .map(|i| (i as f64 + rng.next_f64()) * step)
265 .collect();
266 for k in (1..n_samples).rev() {
268 let idx = (rng.next_u64() as usize) % (k + 1);
269 col.swap(k, idx);
270 }
271 for i in 0..n_samples {
272 data[i * n_dims + j] = col[i];
273 }
274 }
275 LatinHypercubeSample {
276 n_samples,
277 n_dims,
278 data,
279 }
280}
281
282pub fn scale_lhs(lhs: &LatinHypercubeSample, bounds: &[(f64, f64)]) -> Vec<Vec<f64>> {
286 assert_eq!(bounds.len(), lhs.n_dims);
287 (0..lhs.n_samples)
288 .map(|i| {
289 (0..lhs.n_dims)
290 .map(|j| {
291 let (lo, hi) = bounds[j];
292 lo + lhs.get(i, j) * (hi - lo)
293 })
294 .collect()
295 })
296 .collect()
297}
298
299#[derive(Debug, Clone)]
307pub struct MorrisResult {
308 pub mu_star: Vec<f64>,
310 pub sigma: Vec<f64>,
312}
313
314pub fn morris_sensitivity(
327 n_dims: usize,
328 r: usize,
329 delta: f64,
330 model: &dyn Fn(&[f64]) -> f64,
331 bounds: &[(f64, f64)],
332 seed: u64,
333) -> MorrisResult {
334 let mut rng = UqRng::new(seed);
335 let mut all_effects: Vec<Vec<f64>> = vec![Vec::new(); n_dims];
336
337 for _ in 0..r {
338 let mut x: Vec<f64> = bounds
340 .iter()
341 .map(|(lo, hi)| lo + rng.next_f64() * (hi - lo))
342 .collect();
343 let mut perm: Vec<usize> = (0..n_dims).collect();
345 for k in (1..n_dims).rev() {
346 let idx = (rng.next_u64() as usize) % (k + 1);
347 perm.swap(k, idx);
348 }
349 let mut y0 = model(&x);
350 for &dim in &perm {
351 let (lo, hi) = bounds[dim];
352 let step = delta * (hi - lo);
353 let new_val = if x[dim] + step <= hi {
355 x[dim] + step
356 } else {
357 x[dim] - step
358 };
359 let old_val = x[dim];
360 x[dim] = new_val;
361 let y1 = model(&x);
362 let ee = (y1 - y0) / (new_val - old_val);
363 all_effects[dim].push(ee);
364 y0 = y1;
365 }
366 }
367
368 let mu_star: Vec<f64> = all_effects
369 .iter()
370 .map(|effects| {
371 if effects.is_empty() {
372 0.0
373 } else {
374 effects.iter().map(|e| e.abs()).sum::<f64>() / effects.len() as f64
375 }
376 })
377 .collect();
378
379 let sigma: Vec<f64> = all_effects
380 .iter()
381 .map(|effects| {
382 if effects.len() < 2 {
383 return 0.0;
384 }
385 let m = effects.iter().sum::<f64>() / effects.len() as f64;
386 let v =
387 effects.iter().map(|e| (e - m).powi(2)).sum::<f64>() / (effects.len() - 1) as f64;
388 v.sqrt()
389 })
390 .collect();
391
392 MorrisResult { mu_star, sigma }
393}
394
395#[derive(Debug, Clone)]
399pub struct SobolIndices {
400 pub first_order: Vec<f64>,
402 pub total_order: Vec<f64>,
404}
405
406pub fn sobol_indices(
418 n_dims: usize,
419 n_base: usize,
420 model: &dyn Fn(&[f64]) -> f64,
421 bounds: &[(f64, f64)],
422 seed: u64,
423) -> SobolIndices {
424 let lhs_a = latin_hypercube_sample(n_base, n_dims, seed);
426 let lhs_b = latin_hypercube_sample(n_base, n_dims, seed.wrapping_add(0xDEAD_BEEF));
427
428 let a_mat = scale_lhs(&lhs_a, bounds);
429 let b_mat = scale_lhs(&lhs_b, bounds);
430
431 let fa: Vec<f64> = a_mat.iter().map(|x| model(x)).collect();
433 let fb: Vec<f64> = b_mat.iter().map(|x| model(x)).collect();
434
435 let var_y = {
436 let all: Vec<f64> = fa.iter().chain(fb.iter()).copied().collect();
437 variance(&all)
438 };
439
440 let mut first_order = vec![0.0_f64; n_dims];
441 let mut total_order = vec![0.0_f64; n_dims];
442
443 let n = n_base as f64;
444
445 for dim in 0..n_dims {
446 let ab_i: Vec<Vec<f64>> = (0..n_base)
448 .map(|k| {
449 let mut row = a_mat[k].clone();
450 row[dim] = b_mat[k][dim];
451 row
452 })
453 .collect();
454
455 let fab_i: Vec<f64> = ab_i.iter().map(|x| model(x)).collect();
456
457 let s1_num: f64 = (0..n_base).map(|k| fb[k] * (fab_i[k] - fa[k])).sum::<f64>() / n;
459
460 let st_num: f64 = (0..n_base).map(|k| (fa[k] - fab_i[k]).powi(2)).sum::<f64>() / (2.0 * n);
462
463 first_order[dim] = if var_y > 1e-15 { s1_num / var_y } else { 0.0 };
464 total_order[dim] = if var_y > 1e-15 { st_num / var_y } else { 0.0 };
465 }
466
467 SobolIndices {
468 first_order,
469 total_order,
470 }
471}
472
473pub fn hermite_poly(n: usize, x: f64) -> f64 {
481 match n {
482 0 => 1.0,
483 1 => x,
484 _ => {
485 let mut h_prev = 1.0;
486 let mut h_curr = x;
487 for k in 1..n {
488 let h_next = x * h_curr - k as f64 * h_prev;
489 h_prev = h_curr;
490 h_curr = h_next;
491 }
492 h_curr
493 }
494 }
495}
496
497#[derive(Debug, Clone)]
502pub struct PolynomialChaosExpansion {
503 pub coeffs: Vec<f64>,
505 pub multi_indices: Vec<Vec<usize>>,
507 pub n_dims: usize,
509}
510
511impl PolynomialChaosExpansion {
512 pub fn evaluate(&self, x: &[f64]) -> f64 {
516 assert_eq!(x.len(), self.n_dims);
517 self.coeffs
518 .iter()
519 .zip(self.multi_indices.iter())
520 .map(|(c, idx)| {
521 let basis: f64 = idx
522 .iter()
523 .zip(x.iter())
524 .map(|(°, &xi)| hermite_poly(deg, xi))
525 .product();
526 c * basis
527 })
528 .sum()
529 }
530
531 pub fn pce_mean(&self) -> f64 {
533 self.coeffs
535 .iter()
536 .zip(self.multi_indices.iter())
537 .find(|(_, idx)| idx.iter().all(|&d| d == 0))
538 .map(|(c, _)| *c)
539 .unwrap_or(0.0)
540 }
541
542 pub fn pce_variance(&self) -> f64 {
546 self.coeffs
547 .iter()
548 .zip(self.multi_indices.iter())
549 .filter(|(_, idx)| !idx.iter().all(|&d| d == 0))
550 .map(|(c, idx)| {
551 let norm_sq: f64 = idx.iter().map(|&d| factorial(d) as f64).product();
552 c * c * norm_sq
553 })
554 .sum()
555 }
556}
557
558fn factorial(n: usize) -> u64 {
559 (1..=n as u64).product()
560}
561
562pub fn pce_multi_indices(n_dims: usize, max_degree: usize) -> Vec<Vec<usize>> {
564 let mut result = Vec::new();
565 pce_multi_indices_rec(n_dims, max_degree, 0, &mut Vec::new(), &mut result);
566 result
567}
568
569fn pce_multi_indices_rec(
570 n_dims: usize,
571 remaining: usize,
572 dim: usize,
573 current: &mut Vec<usize>,
574 result: &mut Vec<Vec<usize>>,
575) {
576 if dim == n_dims {
577 result.push(current.clone());
578 return;
579 }
580 let max_here = if dim == n_dims - 1 {
581 remaining
582 } else {
583 remaining
584 };
585 for d in 0..=max_here {
586 current.push(d);
587 if dim + 1 < n_dims {
588 pce_multi_indices_rec(n_dims, remaining - d, dim + 1, current, result);
589 } else {
590 result.push(current.clone());
591 }
592 current.pop();
593 }
594}
595
596pub fn fit_pce(
607 n_dims: usize,
608 max_degree: usize,
609 n_train: usize,
610 model: &dyn Fn(&[f64]) -> f64,
611 seed: u64,
612) -> PolynomialChaosExpansion {
613 let multi_indices = pce_multi_indices(n_dims, max_degree);
614 let n_basis = multi_indices.len();
615
616 let lhs = latin_hypercube_sample(n_train, n_dims, seed);
618
619 let mut x_train: Vec<Vec<f64>> = (0..n_train)
620 .map(|i| {
621 (0..n_dims)
622 .map(|j| probit(lhs.get(i, j).clamp(1e-6, 1.0 - 1e-6)))
623 .collect()
624 })
625 .collect();
626
627 let y_train: Vec<f64> = x_train.iter_mut().map(|x| model(x)).collect();
628
629 let mut psi = vec![0.0_f64; n_train * n_basis];
631 for (i, xi) in x_train.iter().enumerate() {
632 for (j, idx) in multi_indices.iter().enumerate() {
633 let basis: f64 = idx
634 .iter()
635 .zip(xi.iter())
636 .map(|(°, &x)| hermite_poly(deg, x))
637 .product();
638 psi[i * n_basis + j] = basis;
639 }
640 }
641
642 let coeffs = normal_equations_solve(&psi, &y_train, n_train, n_basis);
644
645 PolynomialChaosExpansion {
646 coeffs,
647 multi_indices,
648 n_dims,
649 }
650}
651
652fn normal_equations_solve(a: &[f64], b: &[f64], m: usize, n: usize) -> Vec<f64> {
654 let mut ata = vec![0.0_f64; n * n];
656 let mut atb = vec![0.0_f64; n];
657 for i in 0..m {
658 for j in 0..n {
659 atb[j] += a[i * n + j] * b[i];
660 for k in 0..n {
661 ata[j * n + k] += a[i * n + j] * a[i * n + k];
662 }
663 }
664 }
665 for j in 0..n {
667 ata[j * n + j] += 1e-10;
668 }
669 let mut x = atb.clone();
671 let mut mat = ata;
672 for col in 0..n {
673 let mut max_val = mat[col * n + col].abs();
675 let mut max_row = col;
676 for row in (col + 1)..n {
677 if mat[row * n + col].abs() > max_val {
678 max_val = mat[row * n + col].abs();
679 max_row = row;
680 }
681 }
682 if max_row != col {
683 for k in 0..n {
684 mat.swap(col * n + k, max_row * n + k);
685 }
686 x.swap(col, max_row);
687 }
688 let pivot = mat[col * n + col];
689 if pivot.abs() < 1e-18 {
690 continue;
691 }
692 for row in (col + 1)..n {
693 let factor = mat[row * n + col] / pivot;
694 for k in col..n {
695 let tmp = mat[col * n + k] * factor;
696 mat[row * n + k] -= tmp;
697 }
698 x[row] -= x[col] * factor;
699 }
700 }
701 for col in (0..n).rev() {
703 let pivot = mat[col * n + col];
704 if pivot.abs() < 1e-18 {
705 continue;
706 }
707 x[col] /= pivot;
708 for row in 0..col {
709 let tmp = mat[row * n + col] * x[col];
710 x[row] -= tmp;
711 }
712 }
713 x
714}
715
716#[derive(Debug, Clone, Copy)]
722pub struct CollocationNode {
723 pub point: f64,
725 pub weight: f64,
727}
728
729pub fn gauss_hermite_nodes(order: usize) -> Vec<CollocationNode> {
736 match order {
738 1 => vec![CollocationNode {
739 point: 0.0,
740 weight: 1.772_453_851_f64, }],
742 2 => vec![
743 CollocationNode {
744 point: -FRAC_1_SQRT_2,
745 weight: 0.886_226_925,
746 },
747 CollocationNode {
748 point: FRAC_1_SQRT_2,
749 weight: 0.886_226_925,
750 },
751 ],
752 3 => vec![
753 CollocationNode {
754 point: -1.224_744_871,
755 weight: 0.295_408_975,
756 },
757 CollocationNode {
758 point: 0.0,
759 weight: 1.181_635_901,
760 },
761 CollocationNode {
762 point: 1.224_744_871,
763 weight: 0.295_408_975,
764 },
765 ],
766 4 => vec![
767 CollocationNode {
768 point: -1.650_680_123,
769 weight: 0.081_312_835,
770 },
771 CollocationNode {
772 point: -0.524_647_623,
773 weight: 0.804_914_090,
774 },
775 CollocationNode {
776 point: 0.524_647_623,
777 weight: 0.804_914_090,
778 },
779 CollocationNode {
780 point: 1.650_680_123,
781 weight: 0.081_312_835,
782 },
783 ],
784 5 => vec![
785 CollocationNode {
786 point: -2.020_182_470,
787 weight: 0.019_953_242,
788 },
789 CollocationNode {
790 point: -0.958_572_464,
791 weight: 0.394_424_314,
792 },
793 CollocationNode {
794 point: 0.0,
795 weight: 0.945_308_722,
796 },
797 CollocationNode {
798 point: 0.958_572_464,
799 weight: 0.394_424_314,
800 },
801 CollocationNode {
802 point: 2.020_182_470,
803 weight: 0.019_953_242,
804 },
805 ],
806 _ => gauss_hermite_nodes(3), }
808}
809
810pub fn stochastic_collocation(
819 n_dims: usize,
820 order: usize,
821 model: &dyn Fn(&[f64]) -> f64,
822) -> (f64, f64) {
823 let nodes_1d = gauss_hermite_nodes(order);
824 let _n_pts_1d = nodes_1d.len();
825
826 let pi_n_half = PI.powf(n_dims as f64 / 2.0);
828
829 let mut mean_acc = 0.0_f64;
831 let mut sq_acc = 0.0_f64;
832 let mut x = vec![0.0_f64; n_dims];
833 collocation_recurse(
834 &nodes_1d,
835 n_dims,
836 0,
837 1.0,
838 &mut x,
839 model,
840 &mut mean_acc,
841 &mut sq_acc,
842 );
843
844 mean_acc /= pi_n_half;
845 sq_acc /= pi_n_half;
846 let var = (sq_acc - mean_acc * mean_acc).max(0.0);
847 (mean_acc, var)
848}
849
850#[allow(clippy::too_many_arguments)]
851fn collocation_recurse(
852 nodes: &[CollocationNode],
853 n_dims: usize,
854 dim: usize,
855 w_acc: f64,
856 x: &mut Vec<f64>,
857 model: &dyn Fn(&[f64]) -> f64,
858 mean_acc: &mut f64,
859 sq_acc: &mut f64,
860) {
861 if dim == n_dims {
862 let y = model(x);
863 *mean_acc += w_acc * y;
865 *sq_acc += w_acc * y * y;
866 return;
867 }
868 for node in nodes {
869 x[dim] = node.point * 2.0_f64.sqrt(); collocation_recurse(
871 nodes,
872 n_dims,
873 dim + 1,
874 w_acc * node.weight,
875 x,
876 model,
877 mean_acc,
878 sq_acc,
879 );
880 }
881}
882
883pub fn rbf_kernel(x: &[f64], xp: &[f64], sigma_f: f64, length_scale: f64) -> f64 {
895 let sq_dist: f64 = x.iter().zip(xp.iter()).map(|(a, b)| (a - b).powi(2)).sum();
896 sigma_f * sigma_f * (-sq_dist / (2.0 * length_scale * length_scale)).exp()
897}
898
899#[derive(Debug, Clone)]
901pub struct GaussianProcessSurrogate {
902 pub x_train: Vec<Vec<f64>>,
904 pub y_train: Vec<f64>,
906 pub alpha: Vec<f64>,
908 pub sigma_f: f64,
910 pub length_scale: f64,
912 pub noise_std: f64,
914}
915
916impl GaussianProcessSurrogate {
917 pub fn predict(&self, x_star: &[f64]) -> (f64, f64) {
919 let k_star: Vec<f64> = self
920 .x_train
921 .iter()
922 .map(|xi| rbf_kernel(xi, x_star, self.sigma_f, self.length_scale))
923 .collect();
924
925 let mean_pred: f64 = k_star
926 .iter()
927 .zip(self.alpha.iter())
928 .map(|(k, a)| k * a)
929 .sum();
930
931 let k_ss = rbf_kernel(x_star, x_star, self.sigma_f, self.length_scale);
933 let var_pred = (k_ss
936 - k_star.iter().map(|k| k * k).sum::<f64>() / (self.sigma_f * self.sigma_f + 1e-8))
937 .max(0.0);
938
939 (mean_pred, var_pred)
940 }
941}
942
943pub fn fit_gp(
954 x_train: Vec<Vec<f64>>,
955 y_train: Vec<f64>,
956 sigma_f: f64,
957 length_scale: f64,
958 noise_std: f64,
959) -> GaussianProcessSurrogate {
960 let n = x_train.len();
961 let mut k = vec![0.0_f64; n * n];
963 for i in 0..n {
964 for j in 0..n {
965 k[i * n + j] = rbf_kernel(&x_train[i], &x_train[j], sigma_f, length_scale);
966 }
967 k[i * n + i] += noise_std * noise_std;
968 }
969 let alpha = gaussian_eliminate(&k, &y_train, n);
971 GaussianProcessSurrogate {
972 x_train,
973 y_train,
974 alpha,
975 sigma_f,
976 length_scale,
977 noise_std,
978 }
979}
980
981fn gaussian_eliminate(a: &[f64], b: &[f64], n: usize) -> Vec<f64> {
983 let mut mat = a.to_vec();
984 let mut rhs = b.to_vec();
985 for col in 0..n {
986 let mut max_val = mat[col * n + col].abs();
987 let mut max_row = col;
988 for row in (col + 1)..n {
989 if mat[row * n + col].abs() > max_val {
990 max_val = mat[row * n + col].abs();
991 max_row = row;
992 }
993 }
994 if max_row != col {
995 for k in 0..n {
996 mat.swap(col * n + k, max_row * n + k);
997 }
998 rhs.swap(col, max_row);
999 }
1000 let pivot = mat[col * n + col];
1001 if pivot.abs() < 1e-18 {
1002 continue;
1003 }
1004 for row in (col + 1)..n {
1005 let factor = mat[row * n + col] / pivot;
1006 for k in col..n {
1007 let tmp = mat[col * n + k] * factor;
1008 mat[row * n + k] -= tmp;
1009 }
1010 rhs[row] -= rhs[col] * factor;
1011 }
1012 }
1013 for col in (0..n).rev() {
1014 let pivot = mat[col * n + col];
1015 if pivot.abs() < 1e-18 {
1016 continue;
1017 }
1018 rhs[col] /= pivot;
1019 for row in 0..col {
1020 let tmp = mat[row * n + col] * rhs[col];
1021 rhs[row] -= tmp;
1022 }
1023 }
1024 rhs
1025}
1026
1027#[derive(Debug, Clone)]
1033pub struct FormResult {
1034 pub beta_u: Vec<f64>,
1036 pub beta: f64,
1038 pub pf_form: f64,
1040}
1041
1042pub fn form_analysis(
1054 n_dims: usize,
1055 g: &dyn Fn(&[f64]) -> f64,
1056 max_iter: usize,
1057 tol: f64,
1058 h: f64,
1059) -> FormResult {
1060 let mut u = vec![0.0_f64; n_dims]; let mut beta = 0.0_f64;
1062
1063 for _ in 0..max_iter {
1064 let g0 = g(&u);
1065 let mut grad = vec![0.0_f64; n_dims];
1067 for k in 0..n_dims {
1068 let mut u_plus = u.clone();
1069 u_plus[k] += h;
1070 grad[k] = (g(&u_plus) - g0) / h;
1071 }
1072 let grad_norm: f64 = grad.iter().map(|x| x * x).sum::<f64>().sqrt();
1073 if grad_norm < 1e-15 {
1074 break;
1075 }
1076 let dot_grad_u: f64 = grad.iter().zip(u.iter()).map(|(g, uk)| g * uk).sum();
1078 let factor = (dot_grad_u - g0) / (grad_norm * grad_norm);
1079 let u_new: Vec<f64> = grad.iter().map(|gi| factor * gi).collect();
1080 let new_beta: f64 = u_new.iter().map(|x| x * x).sum::<f64>().sqrt();
1081 if (new_beta - beta).abs() < tol {
1082 u = u_new;
1083 beta = new_beta;
1084 break;
1085 }
1086 beta = new_beta;
1087 u = u_new;
1088 }
1089
1090 let pf = normal_cdf(-beta);
1091 FormResult {
1092 beta_u: u,
1093 beta,
1094 pf_form: pf,
1095 }
1096}
1097
1098pub fn sorm_breitung(beta: f64, curvatures: &[f64]) -> f64 {
1105 let correction: f64 = curvatures
1106 .iter()
1107 .map(|kappa| {
1108 let denom = (1.0 + beta * kappa).max(1e-10);
1109 1.0 / denom.sqrt()
1110 })
1111 .product();
1112 normal_cdf(-beta) * correction
1113}
1114
1115pub fn bootstrap_ci(
1133 data: &[f64],
1134 statistic: &dyn Fn(&[f64]) -> f64,
1135 n_boot: usize,
1136 alpha: f64,
1137 seed: u64,
1138) -> (f64, f64) {
1139 let n = data.len();
1140 let mut rng = UqRng::new(seed);
1141 let mut boot_stats: Vec<f64> = Vec::with_capacity(n_boot);
1142 let mut sample = vec![0.0_f64; n];
1143 for _ in 0..n_boot {
1144 for s in sample.iter_mut() {
1145 *s = data[(rng.next_u64() as usize) % n];
1146 }
1147 boot_stats.push(statistic(&sample));
1148 }
1149 let lo = percentile(&mut boot_stats.clone(), alpha / 2.0 * 100.0);
1150 let hi = percentile(&mut boot_stats, (1.0 - alpha / 2.0) * 100.0);
1151 (lo, hi)
1152}
1153
1154#[derive(Debug, Clone)]
1160pub struct MhConfig {
1161 pub n_samples: usize,
1163 pub burn_in: usize,
1165 pub step_size: f64,
1167 pub seed: u64,
1169}
1170
1171impl Default for MhConfig {
1172 fn default() -> Self {
1173 Self {
1174 n_samples: 5000,
1175 burn_in: 500,
1176 step_size: 0.1,
1177 seed: 42,
1178 }
1179 }
1180}
1181
1182#[derive(Debug, Clone)]
1184pub struct MhResult {
1185 pub samples: Vec<Vec<f64>>,
1187 pub acceptance_rate: f64,
1189}
1190
1191pub fn metropolis_hastings(
1200 log_posterior: &dyn Fn(&[f64]) -> f64,
1201 initial: &[f64],
1202 config: &MhConfig,
1203) -> MhResult {
1204 let _n_dims = initial.len();
1205 let mut rng = UqRng::new(config.seed);
1206 let mut current = initial.to_vec();
1207 let mut log_p_current = log_posterior(¤t);
1208 let total = config.n_samples + config.burn_in;
1209 let mut samples = Vec::with_capacity(config.n_samples);
1210 let mut n_accepted = 0_usize;
1211
1212 for step in 0..total {
1213 let proposal: Vec<f64> = current
1215 .iter()
1216 .map(|x| x + rng.next_normal() * config.step_size)
1217 .collect();
1218 let log_p_proposal = log_posterior(&proposal);
1219 let log_accept = log_p_proposal - log_p_current;
1220 let u = rng.next_f64();
1221 if log_accept >= 0.0 || u < log_accept.exp() {
1222 current = proposal;
1223 log_p_current = log_p_proposal;
1224 if step >= config.burn_in {
1225 n_accepted += 1;
1226 }
1227 }
1228 if step >= config.burn_in {
1229 samples.push(current.clone());
1230 }
1231 }
1232 let acceptance_rate = n_accepted as f64 / config.n_samples as f64;
1233 MhResult {
1234 samples,
1235 acceptance_rate,
1236 }
1237}
1238
1239pub fn probability_of_failure_mc(
1253 params: &[UncertainParameter],
1254 limit_state: &dyn Fn(&[f64]) -> f64,
1255 n_samples: usize,
1256 seed: u64,
1257) -> (f64, f64) {
1258 let mut rng = UqRng::new(seed);
1259 let mut n_fail = 0_usize;
1260 let mut inputs = vec![0.0_f64; params.len()];
1261 for _ in 0..n_samples {
1262 for (i, p) in params.iter().enumerate() {
1263 inputs[i] = rng.next_gaussian(p.mean, p.std);
1264 }
1265 if limit_state(&inputs) <= 0.0 {
1266 n_fail += 1;
1267 }
1268 }
1269 let pf = n_fail as f64 / n_samples as f64;
1270 let cov = if pf > 0.0 {
1271 ((1.0 - pf) / (pf * n_samples as f64)).sqrt()
1272 } else {
1273 f64::INFINITY
1274 };
1275 (pf, cov)
1276}
1277
1278pub fn importance_sampling_pf(
1290 n_dims: usize,
1291 limit_state_std: &dyn Fn(&[f64]) -> f64,
1292 u_star: &[f64],
1293 n_samples: usize,
1294 seed: u64,
1295) -> f64 {
1296 let mut rng = UqRng::new(seed);
1297 let mut sum_w = 0.0_f64;
1298 let mut sum_iw = 0.0_f64;
1299 let mut u = vec![0.0_f64; n_dims];
1300 for _ in 0..n_samples {
1301 for k in 0..n_dims {
1303 u[k] = u_star[k] + rng.next_normal();
1304 }
1305 let norm_u_sq: f64 = u.iter().map(|x| x * x).sum();
1307 let norm_diff_sq: f64 = u
1308 .iter()
1309 .zip(u_star.iter())
1310 .map(|(ui, ui_star)| (ui - ui_star).powi(2))
1311 .sum();
1312 let log_w = -0.5 * norm_u_sq + 0.5 * norm_diff_sq;
1313 let w = log_w.exp();
1314 let indicator = if limit_state_std(&u) <= 0.0 { 1.0 } else { 0.0 };
1315 sum_iw += indicator * w;
1316 sum_w += w;
1317 }
1318 if sum_w < 1e-30 { 0.0 } else { sum_iw / sum_w }
1319}
1320
1321pub fn reliability_index(pf: f64) -> f64 {
1325 -probit(pf.clamp(1e-15, 1.0 - 1e-15))
1326}
1327
1328pub fn delta_sensitivity(
1342 x_samples: &[Vec<f64>],
1343 y_samples: &[f64],
1344 dim: usize,
1345 n_bins: usize,
1346) -> f64 {
1347 let n = y_samples.len();
1348 if n == 0 || n_bins == 0 {
1349 return 0.0;
1350 }
1351
1352 let y_min = y_samples.iter().cloned().fold(f64::INFINITY, f64::min);
1353 let y_max = y_samples.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
1354 let range = (y_max - y_min).max(1e-15);
1355 let bin_w = range / n_bins as f64;
1356
1357 let mut f_y = vec![0.0_f64; n_bins];
1359 for &yi in y_samples {
1360 let bin = ((yi - y_min) / bin_w).floor() as usize;
1361 f_y[bin.min(n_bins - 1)] += 1.0 / (n as f64 * bin_w);
1362 }
1363
1364 let x_dim: Vec<f64> = x_samples.iter().map(|row| row[dim]).collect();
1366 let x_min = x_dim.iter().cloned().fold(f64::INFINITY, f64::min);
1367 let x_max = x_dim.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
1368 let x_range = (x_max - x_min).max(1e-15);
1369 let n_x_bins = (n.isqrt()).max(2);
1370 let x_bin_w = x_range / n_x_bins as f64;
1371
1372 let mut delta = 0.0_f64;
1373 for xi_bin in 0..n_x_bins {
1374 let x_lo = x_min + xi_bin as f64 * x_bin_w;
1375 let x_hi = x_lo + x_bin_w;
1376 let sub_y: Vec<f64> = x_dim
1377 .iter()
1378 .zip(y_samples.iter())
1379 .filter(|&(&xi, _)| xi >= x_lo && xi < x_hi)
1380 .map(|(_, &yi)| yi)
1381 .collect();
1382 if sub_y.is_empty() {
1383 continue;
1384 }
1385 let p_x = sub_y.len() as f64 / n as f64;
1386 let mut f_y_given_x = vec![0.0_f64; n_bins];
1387 for &yi in &sub_y {
1388 let bin = ((yi - y_min) / bin_w).floor() as usize;
1389 f_y_given_x[bin.min(n_bins - 1)] += 1.0 / (sub_y.len() as f64 * bin_w);
1390 }
1391 let tv: f64 = f_y
1392 .iter()
1393 .zip(f_y_given_x.iter())
1394 .map(|(a, b)| (a - b).abs() * bin_w)
1395 .sum();
1396 delta += p_x * tv;
1397 }
1398 delta * 0.5
1399}
1400
1401pub fn coefficient_of_variation(data: &[f64]) -> f64 {
1409 let m = mean(data);
1410 if m.abs() < 1e-15 {
1411 return f64::INFINITY;
1412 }
1413 std_dev(data) / m.abs()
1414}
1415
1416pub fn skewness(data: &[f64]) -> f64 {
1418 if data.len() < 3 {
1419 return 0.0;
1420 }
1421 let m = mean(data);
1422 let s = std_dev(data);
1423 if s < 1e-15 {
1424 return 0.0;
1425 }
1426 let n = data.len() as f64;
1427 data.iter().map(|x| ((x - m) / s).powi(3)).sum::<f64>() / n
1428}
1429
1430pub fn excess_kurtosis(data: &[f64]) -> f64 {
1432 if data.len() < 4 {
1433 return 0.0;
1434 }
1435 let m = mean(data);
1436 let s = std_dev(data);
1437 if s < 1e-15 {
1438 return 0.0;
1439 }
1440 let n = data.len() as f64;
1441 data.iter().map(|x| ((x - m) / s).powi(4)).sum::<f64>() / n - 3.0
1442}
1443
1444pub fn probability_box(scenarios: &[Vec<f64>], eval_points: &[f64]) -> (Vec<f64>, Vec<f64>) {
1455 let n_pts = eval_points.len();
1456 let mut lower = vec![f64::INFINITY; n_pts];
1457 let mut upper = vec![f64::NEG_INFINITY; n_pts];
1458
1459 for scenario in scenarios {
1460 let n = scenario.len() as f64;
1461 for (k, &ep) in eval_points.iter().enumerate() {
1462 let cdf = scenario.iter().filter(|&&x| x <= ep).count() as f64 / n;
1463 if cdf < lower[k] {
1464 lower[k] = cdf;
1465 }
1466 if cdf > upper[k] {
1467 upper[k] = cdf;
1468 }
1469 }
1470 }
1471 (lower, upper)
1472}
1473
1474#[cfg(test)]
1479mod tests {
1480 use super::*;
1481
1482 fn linear_model(x: &[f64]) -> f64 {
1485 x.iter()
1486 .enumerate()
1487 .map(|(i, xi)| (i + 1) as f64 * xi)
1488 .sum()
1489 }
1490
1491 #[test]
1494 fn test_mean_empty() {
1495 assert_eq!(mean(&[]), 0.0);
1496 }
1497
1498 #[test]
1499 fn test_mean_single() {
1500 assert!((mean(&[42.0]) - 42.0).abs() < 1e-12);
1501 }
1502
1503 #[test]
1504 fn test_mean_basic() {
1505 assert!((mean(&[1.0, 2.0, 3.0, 4.0, 5.0]) - 3.0).abs() < 1e-12);
1506 }
1507
1508 #[test]
1509 fn test_variance_zero() {
1510 assert_eq!(variance(&[5.0, 5.0, 5.0]), 0.0);
1511 }
1512
1513 #[test]
1514 fn test_variance_known() {
1515 let data = [2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
1516 let v = variance(&data);
1517 assert!((v - 4.0).abs() < 0.1, "variance={v}");
1518 }
1519
1520 #[test]
1521 fn test_std_dev_nonneg() {
1522 let data: Vec<f64> = (1..=100).map(|i| i as f64).collect();
1523 assert!(std_dev(&data) > 0.0);
1524 }
1525
1526 #[test]
1529 fn test_probit_symmetry() {
1530 assert!((probit(0.5)).abs() < 1e-3);
1531 }
1532
1533 #[test]
1534 fn test_normal_cdf_symmetry() {
1535 assert!((normal_cdf(0.0) - 0.5).abs() < 1e-6);
1536 }
1537
1538 #[test]
1539 fn test_probit_normal_cdf_inverse() {
1540 for p in [0.1, 0.25, 0.5, 0.75, 0.9] {
1541 let x = probit(p);
1542 let p2 = normal_cdf(x);
1543 assert!((p2 - p).abs() < 1e-3, "p={p} p2={p2}");
1544 }
1545 }
1546
1547 #[test]
1550 fn test_percentile_median() {
1551 let mut data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1552 assert!((percentile(&mut data, 50.0) - 3.0).abs() < 1e-10);
1553 }
1554
1555 #[test]
1556 fn test_percentile_min_max() {
1557 let mut data = vec![10.0, 20.0, 30.0];
1558 assert!((percentile(&mut data, 0.0) - 10.0).abs() < 1e-10);
1559 let mut data2 = vec![10.0, 20.0, 30.0];
1560 assert!((percentile(&mut data2, 100.0) - 30.0).abs() < 1e-10);
1561 }
1562
1563 #[test]
1566 fn test_hermite_poly_degree0() {
1567 assert_eq!(hermite_poly(0, 3.5), 1.0);
1568 }
1569
1570 #[test]
1571 fn test_hermite_poly_degree1() {
1572 assert!((hermite_poly(1, 2.7) - 2.7).abs() < 1e-12);
1573 }
1574
1575 #[test]
1576 fn test_hermite_poly_degree2() {
1577 let x = 2.0;
1579 assert!((hermite_poly(2, x) - (x * x - 1.0)).abs() < 1e-12);
1580 }
1581
1582 #[test]
1583 fn test_hermite_poly_degree3() {
1584 let x = 1.5;
1586 assert!((hermite_poly(3, x) - (x.powi(3) - 3.0 * x)).abs() < 1e-12);
1587 }
1588
1589 #[test]
1592 fn test_lhs_shape() {
1593 let lhs = latin_hypercube_sample(10, 3, 42);
1594 assert_eq!(lhs.n_samples, 10);
1595 assert_eq!(lhs.n_dims, 3);
1596 assert_eq!(lhs.data.len(), 30);
1597 }
1598
1599 #[test]
1600 fn test_lhs_bounds() {
1601 let lhs = latin_hypercube_sample(50, 4, 7);
1602 for v in &lhs.data {
1603 assert!(*v >= 0.0 && *v <= 1.0, "value out of [0,1]: {v}");
1604 }
1605 }
1606
1607 #[test]
1608 fn test_lhs_stratification_1d() {
1609 let n = 20;
1610 let lhs = latin_hypercube_sample(n, 1, 123);
1611 let mut bins = vec![false; n];
1612 let step = 1.0 / n as f64;
1613 for i in 0..n {
1614 let v = lhs.get(i, 0);
1615 let b = (v / step).floor() as usize;
1616 let b = b.min(n - 1);
1617 bins[b] = true;
1618 }
1619 assert!(bins.iter().all(|&b| b), "LHS should cover all strata");
1620 }
1621
1622 #[test]
1623 fn test_scale_lhs() {
1624 let lhs = latin_hypercube_sample(5, 2, 0);
1625 let bounds = [(0.0, 10.0), (-5.0, 5.0)];
1626 let scaled = scale_lhs(&lhs, &bounds);
1627 for row in &scaled {
1628 assert!(row[0] >= 0.0 && row[0] <= 10.0);
1629 assert!(row[1] >= -5.0 && row[1] <= 5.0);
1630 }
1631 }
1632
1633 #[test]
1636 fn test_mc_propagation_mean() {
1637 let params = vec![
1639 UncertainParameter::new(2.0, 0.1),
1640 UncertainParameter::new(3.0, 0.1),
1641 ];
1642 let result = monte_carlo_propagation(¶ms, 5000, &|x| x[0] + x[1], 1);
1643 assert!((result.mean - 5.0).abs() < 0.1, "mean={}", result.mean);
1644 }
1645
1646 #[test]
1647 fn test_mc_propagation_std() {
1648 let params = vec![UncertainParameter::new(0.0, 1.0)];
1650 let result = monte_carlo_propagation(¶ms, 10_000, &|x| x[0], 99);
1651 assert!((result.std - 1.0).abs() < 0.1, "std={}", result.std);
1652 }
1653
1654 #[test]
1655 fn test_mc_propagation_percentiles() {
1656 let params = vec![UncertainParameter::new(0.0, 1.0)];
1657 let result = monte_carlo_propagation(¶ms, 10_000, &|x| x[0], 7);
1658 assert!(result.p05 < 0.0 && result.p95 > 0.0);
1660 }
1661
1662 #[test]
1665 fn test_morris_sensitivity_shape() {
1666 let bounds = vec![(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)];
1667 let result = morris_sensitivity(3, 20, 0.1, &linear_model, &bounds, 5);
1668 assert_eq!(result.mu_star.len(), 3);
1669 assert_eq!(result.sigma.len(), 3);
1670 }
1671
1672 #[test]
1673 fn test_morris_sensitivity_rank_order() {
1674 let bounds = vec![(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)];
1676 let result = morris_sensitivity(3, 50, 0.1, &linear_model, &bounds, 13);
1677 assert!(
1678 result.mu_star[2] >= result.mu_star[1],
1679 "mu*[2]={} mu*[1]={}",
1680 result.mu_star[2],
1681 result.mu_star[1]
1682 );
1683 assert!(
1684 result.mu_star[1] >= result.mu_star[0],
1685 "mu*[1]={} mu*[0]={}",
1686 result.mu_star[1],
1687 result.mu_star[0]
1688 );
1689 }
1690
1691 #[test]
1694 fn test_sobol_shape() {
1695 let bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1696 let indices = sobol_indices(2, 100, &|x| x[0] + x[1], &bounds, 42);
1697 assert_eq!(indices.first_order.len(), 2);
1698 assert_eq!(indices.total_order.len(), 2);
1699 }
1700
1701 #[test]
1702 fn test_sobol_additive_model() {
1703 let bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1705 let indices = sobol_indices(2, 500, &|x| x[0] + x[1], &bounds, 99);
1706 assert!(
1707 (indices.first_order[0] - indices.first_order[1]).abs() < 0.2,
1708 "S1={} S2={}",
1709 indices.first_order[0],
1710 indices.first_order[1]
1711 );
1712 }
1713
1714 #[test]
1717 fn test_collocation_constant_model() {
1718 let (m, v) = stochastic_collocation(1, 3, &|_| 5.0);
1719 assert!((m - 5.0).abs() < 1e-6, "mean={m}");
1720 assert!(v < 1e-10, "var={v}");
1721 }
1722
1723 #[test]
1724 fn test_collocation_linear_model_mean() {
1725 let (m, _v) = stochastic_collocation(1, 5, &|x| x[0]);
1727 assert!(m.abs() < 0.1, "mean={m}");
1728 }
1729
1730 #[test]
1733 fn test_gp_fit_predict_interpolation() {
1734 let x_train: Vec<Vec<f64>> = (0..5).map(|i| vec![i as f64]).collect();
1735 let y_train: Vec<f64> = x_train.iter().map(|x| x[0] * x[0]).collect();
1736 let gp = fit_gp(x_train.clone(), y_train.clone(), 2.0, 1.5, 0.01);
1737 for (i, xi) in x_train.iter().enumerate() {
1739 let (pred_m, _pred_v) = gp.predict(xi);
1740 assert!(
1741 (pred_m - y_train[i]).abs() < 2.0,
1742 "i={i} pred={pred_m} true={}",
1743 y_train[i]
1744 );
1745 }
1746 }
1747
1748 #[test]
1749 fn test_rbf_kernel_symmetry() {
1750 let x = vec![1.0, 2.0];
1751 let y = vec![3.0, 1.0];
1752 let k1 = rbf_kernel(&x, &y, 1.0, 1.0);
1753 let k2 = rbf_kernel(&y, &x, 1.0, 1.0);
1754 assert!((k1 - k2).abs() < 1e-12);
1755 }
1756
1757 #[test]
1758 fn test_rbf_kernel_self_similarity() {
1759 let x = vec![1.0, 2.0, 3.0];
1760 let k = rbf_kernel(&x, &x, 2.0, 1.0);
1761 assert!((k - 4.0).abs() < 1e-12); }
1763
1764 #[test]
1767 fn test_form_simple_limit_state() {
1768 let beta_target = 2.0;
1770 let result = form_analysis(1, &|u| beta_target - u[0], 50, 1e-6, 1e-5);
1771 assert!(
1772 (result.beta - beta_target).abs() < 0.1,
1773 "beta={}",
1774 result.beta
1775 );
1776 }
1777
1778 #[test]
1779 fn test_form_pf_decreases_with_beta() {
1780 let r1 = form_analysis(2, &|u| 3.0 - u[0] - u[1], 50, 1e-6, 1e-5);
1781 let r2 = form_analysis(2, &|u| 1.0 - u[0] - u[1], 50, 1e-6, 1e-5);
1782 assert!(
1783 r1.pf_form < r2.pf_form,
1784 "pf1={} pf2={}",
1785 r1.pf_form,
1786 r2.pf_form
1787 );
1788 }
1789
1790 #[test]
1793 fn test_sorm_zero_curvature_equals_form() {
1794 let beta = 2.0;
1795 let pf_form = normal_cdf(-beta);
1796 let pf_sorm = sorm_breitung(beta, &[]);
1797 assert!((pf_sorm - pf_form).abs() < 1e-12);
1798 }
1799
1800 #[test]
1801 fn test_sorm_positive_curvature_reduces_pf() {
1802 let beta = 2.0;
1803 let pf_form = normal_cdf(-beta);
1804 let pf_sorm = sorm_breitung(beta, &[0.5]);
1805 assert!(pf_sorm < pf_form, "pf_sorm={pf_sorm} pf_form={pf_form}");
1806 }
1807
1808 #[test]
1811 fn test_bootstrap_ci_contains_true_mean() {
1812 let data: Vec<f64> = (0..100).map(|i| i as f64 / 100.0).collect();
1813 let (lo, hi) = bootstrap_ci(&data, &|s| mean(s), 1000, 0.05, 42);
1814 let true_mean = 0.495;
1815 assert!(
1816 lo <= true_mean && true_mean <= hi,
1817 "CI=[{lo},{hi}] true_mean={true_mean}"
1818 );
1819 }
1820
1821 #[test]
1822 fn test_bootstrap_ci_ordering() {
1823 let data: Vec<f64> = (1..=50).map(|i| i as f64).collect();
1824 let (lo, hi) = bootstrap_ci(&data, &|s| mean(s), 500, 0.1, 7);
1825 assert!(lo <= hi, "lo={lo} hi={hi}");
1826 }
1827
1828 #[test]
1831 fn test_mh_samples_count() {
1832 let config = MhConfig {
1833 n_samples: 200,
1834 burn_in: 50,
1835 step_size: 0.5,
1836 seed: 1,
1837 };
1838 let log_post = |theta: &[f64]| -0.5 * theta[0] * theta[0];
1839 let result = metropolis_hastings(&log_post, &[0.0], &config);
1840 assert_eq!(result.samples.len(), 200);
1841 }
1842
1843 #[test]
1844 fn test_mh_normal_target_mean() {
1845 let config = MhConfig {
1847 n_samples: 5000,
1848 burn_in: 500,
1849 step_size: 1.0,
1850 seed: 42,
1851 };
1852 let log_post = |theta: &[f64]| -0.5 * (theta[0] - 3.0).powi(2);
1853 let result = metropolis_hastings(&log_post, &[0.0], &config);
1854 let vals: Vec<f64> = result.samples.iter().map(|s| s[0]).collect();
1855 let m = mean(&vals);
1856 assert!((m - 3.0).abs() < 0.3, "mean={m}");
1857 }
1858
1859 #[test]
1860 fn test_mh_acceptance_rate_reasonable() {
1861 let config = MhConfig {
1862 n_samples: 1000,
1863 burn_in: 100,
1864 step_size: 0.5,
1865 seed: 77,
1866 };
1867 let log_post = |theta: &[f64]| -0.5 * theta[0] * theta[0];
1868 let result = metropolis_hastings(&log_post, &[0.0], &config);
1869 assert!(
1871 result.acceptance_rate > 0.05 && result.acceptance_rate < 0.95,
1872 "acceptance_rate={}",
1873 result.acceptance_rate
1874 );
1875 }
1876
1877 #[test]
1880 fn test_pof_mc_certain_failure() {
1881 let params = vec![UncertainParameter::new(0.0, 1.0)];
1883 let (pf, _cov) = probability_of_failure_mc(¶ms, &|_| -1.0, 1000, 0);
1884 assert!((pf - 1.0).abs() < 1e-10, "pf={pf}");
1885 }
1886
1887 #[test]
1888 fn test_pof_mc_certain_safe() {
1889 let params = vec![UncertainParameter::new(0.0, 1.0)];
1891 let (pf, _cov) = probability_of_failure_mc(¶ms, &|_| 1.0, 1000, 0);
1892 assert_eq!(pf, 0.0);
1893 }
1894
1895 #[test]
1896 fn test_pof_mc_reasonable_estimate() {
1897 let params = vec![UncertainParameter::new(0.0, 1.0)];
1899 let (pf, _cov) = probability_of_failure_mc(¶ms, &|x| 2.0 - x[0], 50_000, 5);
1900 assert!((pf - 0.0228).abs() < 0.01, "pf={pf}, expected≈0.0228");
1901 }
1902
1903 #[test]
1906 fn test_cov_finite() {
1907 let data = vec![1.0, 2.0, 3.0];
1908 let cov = coefficient_of_variation(&data);
1909 assert!(cov.is_finite() && cov > 0.0);
1910 }
1911
1912 #[test]
1913 fn test_skewness_symmetric() {
1914 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1915 assert!(skewness(&data).abs() < 0.1);
1916 }
1917
1918 #[test]
1919 fn test_excess_kurtosis_normal() {
1920 let mut rng = UqRng::new(123);
1922 let data: Vec<f64> = (0..5000).map(|_| rng.next_normal()).collect();
1923 let k = excess_kurtosis(&data);
1924 assert!(k.abs() < 0.5, "excess_kurtosis={k}");
1925 }
1926
1927 #[test]
1930 fn test_probability_box_bounds() {
1931 let s1 = vec![1.0, 2.0, 3.0, 4.0];
1932 let s2 = vec![0.5, 1.5, 3.5, 5.0];
1933 let (lower, upper) = probability_box(&[s1, s2], &[2.0, 3.0]);
1934 assert!(lower[0] <= upper[0]);
1935 assert!(lower[1] <= upper[1]);
1936 }
1937
1938 #[test]
1941 fn test_reliability_index_known() {
1942 let pf = normal_cdf(-2.0);
1944 let beta = reliability_index(pf);
1945 assert!((beta - 2.0).abs() < 0.05, "beta={beta}");
1946 }
1947
1948 #[test]
1951 fn test_pce_mean_constant_model() {
1952 let pce = fit_pce(1, 1, 10, &|_| 7.0, 42);
1953 assert!(
1954 (pce.pce_mean() - 7.0).abs() < 0.5,
1955 "mean={}",
1956 pce.pce_mean()
1957 );
1958 }
1959
1960 #[test]
1961 fn test_pce_evaluate_consistency() {
1962 let pce = fit_pce(2, 2, 30, &|x| x[0] + x[1], 3);
1963 let pred = pce.evaluate(&[0.0, 0.0]);
1965 assert!(pred.abs() < 2.0, "pred={pred}");
1966 }
1967
1968 #[test]
1971 fn test_gauss_hermite_order1_weight() {
1972 let nodes = gauss_hermite_nodes(1);
1973 assert_eq!(nodes.len(), 1);
1974 assert!((nodes[0].weight - PI.sqrt()).abs() < 1e-6);
1975 }
1976
1977 #[test]
1978 fn test_gauss_hermite_order3_symmetry() {
1979 let nodes = gauss_hermite_nodes(3);
1980 assert_eq!(nodes.len(), 3);
1981 assert!((nodes[0].point + nodes[2].point).abs() < 1e-8);
1983 }
1984}