1use scirs2_core::random::{rngs::StdRng, Rng, SeedableRng};
18use scirs2_core::RngExt;
19
20#[derive(Debug, Clone)]
26pub enum SimError {
27 DimensionMismatch { expected: usize, got: usize },
29 NoTrainingData,
31 CholeskyFailed,
33 InvalidConfig(String),
35 NumericalError(String),
37}
38
39impl std::fmt::Display for SimError {
40 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
41 match self {
42 SimError::DimensionMismatch { expected, got } => {
43 write!(f, "dimension mismatch: expected {expected}, got {got}")
44 }
45 SimError::NoTrainingData => write!(f, "surrogate has no training data"),
46 SimError::CholeskyFailed => {
47 write!(
48 f,
49 "Cholesky decomposition failed: matrix not positive-definite"
50 )
51 }
52 SimError::InvalidConfig(s) => write!(f, "invalid config: {s}"),
53 SimError::NumericalError(s) => write!(f, "numerical error: {s}"),
54 }
55 }
56}
57
58impl std::error::Error for SimError {}
59
60type Result<T> = std::result::Result<T, SimError>;
61
62#[derive(Debug, Clone, Copy, PartialEq, Eq)]
68pub enum SurrogateActivation {
69 ReLU,
70 Tanh,
71 GELU,
72 Swish,
73}
74
75impl SurrogateActivation {
76 fn apply(self, x: f32) -> f32 {
78 match self {
79 SurrogateActivation::ReLU => x.max(0.0),
80 SurrogateActivation::Tanh => x.tanh(),
81 SurrogateActivation::GELU => {
82 let inner = 0.797_884_6_f32 * (x + 0.044715 * x * x * x);
84 0.5 * x * (1.0 + inner.tanh())
85 }
86 SurrogateActivation::Swish => x * (1.0 / (1.0 + (-x).exp())),
87 }
88 }
89}
90
91#[derive(Debug, Clone)]
93pub struct SurrogateConfig {
94 pub input_dim: usize,
95 pub hidden_dims: Vec<usize>,
96 pub output_dim: usize,
97 pub activation: SurrogateActivation,
98}
99
100#[derive(Debug, Clone)]
103pub struct DenseLayer {
104 pub weights: Vec<f32>, pub bias: Vec<f32>, pub in_dim: usize,
107 pub out_dim: usize,
108}
109
110impl DenseLayer {
111 fn new(in_dim: usize, out_dim: usize, rng: &mut StdRng) -> Self {
113 let limit = (6.0_f32 / (in_dim + out_dim) as f32).sqrt();
114 let weights: Vec<f32> = (0..in_dim * out_dim)
115 .map(|_| {
116 let u: f32 = rng.random::<f32>();
117 -limit + 2.0 * limit * u
118 })
119 .collect();
120 let bias = vec![0.0_f32; out_dim];
121 DenseLayer {
122 weights,
123 bias,
124 in_dim,
125 out_dim,
126 }
127 }
128
129 fn forward(&self, x: &[f32]) -> Vec<f32> {
131 let mut out = self.bias.clone();
132 for o in 0..self.out_dim {
133 for i in 0..self.in_dim {
134 out[o] += self.weights[o * self.in_dim + i] * x[i];
135 }
136 }
137 out
138 }
139}
140
141#[derive(Debug, Clone)]
146pub struct NeuralSurrogate {
147 pub layers: Vec<DenseLayer>,
148 pub activation: SurrogateActivation,
149 pub config: SurrogateConfig,
150}
151
152impl NeuralSurrogate {
153 pub fn new(config: SurrogateConfig, seed: u64) -> Self {
155 let mut rng = StdRng::seed_from_u64(seed);
156 let mut dims: Vec<usize> = vec![config.input_dim];
157 dims.extend_from_slice(&config.hidden_dims);
158 dims.push(config.output_dim);
159
160 let layers: Vec<DenseLayer> = dims
161 .windows(2)
162 .map(|w| DenseLayer::new(w[0], w[1], &mut rng))
163 .collect();
164
165 NeuralSurrogate {
166 layers,
167 activation: config.activation,
168 config,
169 }
170 }
171
172 pub fn forward(&self, x: &[f32]) -> Vec<f32> {
174 let n_layers = self.layers.len();
175 let mut h: Vec<f32> = x.to_vec();
176 for (idx, layer) in self.layers.iter().enumerate() {
177 let pre = layer.forward(&h);
178 h = if idx < n_layers - 1 {
180 pre.iter().map(|&v| self.activation.apply(v)).collect()
181 } else {
182 pre
183 };
184 }
185 h
186 }
187
188 fn mse_loss(&self, x: &[f32], y: &[f32]) -> f32 {
190 let pred = self.forward(x);
191 pred.iter()
192 .zip(y.iter())
193 .map(|(p, t)| (p - t) * (p - t))
194 .sum::<f32>()
195 / y.len() as f32
196 }
197
198 pub fn train_step(&mut self, x: &[f32], y: &[f32], lr: f32) -> f32 {
201 let eps = 1e-4_f32;
202 let base_loss = self.mse_loss(x, y);
203
204 for layer_idx in 0..self.layers.len() {
206 let n_w = self.layers[layer_idx].weights.len();
208 let mut w_grads = vec![0.0_f32; n_w];
209 for wi in 0..n_w {
210 let orig = self.layers[layer_idx].weights[wi];
211 self.layers[layer_idx].weights[wi] = orig + eps;
212 let loss_plus = self.mse_loss(x, y);
213 self.layers[layer_idx].weights[wi] = orig;
214 w_grads[wi] = (loss_plus - base_loss) / eps;
215 }
216 for wi in 0..n_w {
218 self.layers[layer_idx].weights[wi] -= lr * w_grads[wi];
219 }
220
221 let n_b = self.layers[layer_idx].bias.len();
223 let mut b_grads = vec![0.0_f32; n_b];
224 for bi in 0..n_b {
225 let orig = self.layers[layer_idx].bias[bi];
226 self.layers[layer_idx].bias[bi] = orig + eps;
227 let loss_plus = self.mse_loss(x, y);
228 self.layers[layer_idx].bias[bi] = orig;
229 b_grads[bi] = (loss_plus - base_loss) / eps;
230 }
231 for bi in 0..n_b {
232 self.layers[layer_idx].bias[bi] -= lr * b_grads[bi];
233 }
234 }
235
236 base_loss
237 }
238}
239
240#[derive(Debug, Clone)]
249pub struct EnsembleSurrogate {
250 pub models: Vec<NeuralSurrogate>,
251}
252
253impl EnsembleSurrogate {
254 pub fn new(config: SurrogateConfig, n_models: usize, base_seed: u64) -> Self {
256 let models = (0..n_models)
257 .map(|i| NeuralSurrogate::new(config.clone(), base_seed + i as u64))
258 .collect();
259 EnsembleSurrogate { models }
260 }
261
262 pub fn predict_with_uncertainty(&self, x: &[f32]) -> (Vec<f32>, Vec<f32>) {
264 let preds: Vec<Vec<f32>> = self.models.iter().map(|m| m.forward(x)).collect();
265 if preds.is_empty() {
266 return (vec![], vec![]);
267 }
268 let out_dim = preds[0].len();
269 let n = preds.len() as f32;
270
271 let mean: Vec<f32> = (0..out_dim)
272 .map(|j| preds.iter().map(|p| p[j]).sum::<f32>() / n)
273 .collect();
274
275 let variance: Vec<f32> = (0..out_dim)
276 .map(|j| {
277 let m = mean[j];
278 preds.iter().map(|p| (p[j] - m) * (p[j] - m)).sum::<f32>() / n
279 })
280 .collect();
281
282 (mean, variance)
283 }
284
285 pub fn active_learning_score(&self, x: &[f32]) -> f32 {
287 let (_, var) = self.predict_with_uncertainty(x);
288 if var.is_empty() {
289 return 0.0;
290 }
291 var.iter().sum::<f32>() / var.len() as f32
292 }
293
294 pub fn train_step_all(&mut self, x: &[f32], y: &[f32], lr: f32) -> Vec<f32> {
296 self.models
297 .iter_mut()
298 .map(|m| m.train_step(x, y, lr))
299 .collect()
300 }
301}
302
303#[derive(Debug, Clone)]
310pub struct PhysicsResidual {
311 pub constraint_matrix: Vec<f32>,
313 pub rhs: Vec<f32>,
315 pub n_constraints: usize,
316 pub output_dim: usize,
317}
318
319impl PhysicsResidual {
320 pub fn new(
322 constraint_matrix: Vec<f32>,
323 rhs: Vec<f32>,
324 n_constraints: usize,
325 output_dim: usize,
326 ) -> Self {
327 PhysicsResidual {
328 constraint_matrix,
329 rhs,
330 n_constraints,
331 output_dim,
332 }
333 }
334
335 pub fn physics_loss(&self, pred: &[f32]) -> f32 {
337 let mut total = 0.0_f32;
338 for ci in 0..self.n_constraints {
339 let mut row_val = 0.0_f32;
340 for j in 0..self.output_dim.min(pred.len()) {
341 row_val += self.constraint_matrix[ci * self.output_dim + j] * pred[j];
342 }
343 let diff = row_val - self.rhs[ci];
344 total += diff * diff;
345 }
346 if self.n_constraints > 0 {
347 total / self.n_constraints as f32
348 } else {
349 0.0
350 }
351 }
352}
353
354#[derive(Debug, Clone)]
358pub struct PiSurrogate {
359 pub base: NeuralSurrogate,
360 pub constraints: Vec<PhysicsResidual>,
361 pub lambda: f32,
363}
364
365impl PiSurrogate {
366 pub fn new(
368 config: SurrogateConfig,
369 constraints: Vec<PhysicsResidual>,
370 lambda: f32,
371 seed: u64,
372 ) -> Self {
373 PiSurrogate {
374 base: NeuralSurrogate::new(config, seed),
375 constraints,
376 lambda,
377 }
378 }
379
380 pub fn forward(&self, x: &[f32]) -> Vec<f32> {
382 self.base.forward(x)
383 }
384
385 pub fn total_loss(&self, x: &[f32], y: &[f32]) -> f32 {
387 let pred = self.base.forward(x);
388
389 let data_loss = pred
391 .iter()
392 .zip(y.iter())
393 .map(|(p, t)| (p - t) * (p - t))
394 .sum::<f32>()
395 / y.len().max(1) as f32;
396
397 let phys_loss: f32 = self
399 .constraints
400 .iter()
401 .map(|c| c.physics_loss(&pred))
402 .sum::<f32>();
403
404 data_loss + self.lambda * phys_loss
405 }
406
407 pub fn train_step(&mut self, x: &[f32], y: &[f32], lr: f32) -> f32 {
409 let eps = 1e-4_f32;
410
411 let base_loss = self.total_loss(x, y);
413
414 for layer_idx in 0..self.base.layers.len() {
415 let n_w = self.base.layers[layer_idx].weights.len();
417 let mut w_grads = vec![0.0_f32; n_w];
418 for wi in 0..n_w {
419 let orig = self.base.layers[layer_idx].weights[wi];
420 self.base.layers[layer_idx].weights[wi] = orig + eps;
421 let loss_plus = self.total_loss(x, y);
422 self.base.layers[layer_idx].weights[wi] = orig;
423 w_grads[wi] = (loss_plus - base_loss) / eps;
424 }
425 for wi in 0..n_w {
426 self.base.layers[layer_idx].weights[wi] -= lr * w_grads[wi];
427 }
428
429 let n_b = self.base.layers[layer_idx].bias.len();
431 let mut b_grads = vec![0.0_f32; n_b];
432 for bi in 0..n_b {
433 let orig = self.base.layers[layer_idx].bias[bi];
434 self.base.layers[layer_idx].bias[bi] = orig + eps;
435 let loss_plus = self.total_loss(x, y);
436 self.base.layers[layer_idx].bias[bi] = orig;
437 b_grads[bi] = (loss_plus - base_loss) / eps;
438 }
439 for bi in 0..n_b {
440 self.base.layers[layer_idx].bias[bi] -= lr * b_grads[bi];
441 }
442 }
443
444 base_loss
445 }
446}
447
448#[derive(Debug, Clone)]
454pub struct RbfKernel {
455 pub length_scale: f32,
457 pub signal_var: f32,
459 pub noise_var: f32,
461}
462
463impl RbfKernel {
464 pub fn new(length_scale: f32, signal_var: f32, noise_var: f32) -> Self {
466 RbfKernel {
467 length_scale,
468 signal_var,
469 noise_var,
470 }
471 }
472
473 pub fn kernel(&self, x1: &[f32], x2: &[f32]) -> f32 {
475 let sq_dist: f32 = x1
476 .iter()
477 .zip(x2.iter())
478 .map(|(a, b)| (a - b) * (a - b))
479 .sum();
480 self.signal_var * (-sq_dist / (2.0 * self.length_scale * self.length_scale)).exp()
481 }
482}
483
484#[derive(Debug, Clone)]
488pub struct GpSurrogate {
489 pub x_train: Vec<Vec<f32>>,
490 pub y_train: Vec<f32>,
491 pub kernel: RbfKernel,
492 pub alpha: Vec<f32>,
494 chol: Vec<f32>,
496 n: usize,
497}
498
499impl GpSurrogate {
500 pub fn new(kernel: RbfKernel) -> Self {
502 GpSurrogate {
503 x_train: vec![],
504 y_train: vec![],
505 kernel,
506 alpha: vec![],
507 chol: vec![],
508 n: 0,
509 }
510 }
511
512 pub fn fit(&mut self, x_train: Vec<Vec<f32>>, y_train: Vec<f32>) -> Result<()> {
514 if x_train.len() != y_train.len() {
515 return Err(SimError::DimensionMismatch {
516 expected: x_train.len(),
517 got: y_train.len(),
518 });
519 }
520 self.n = x_train.len();
521 self.x_train = x_train;
522 self.y_train = y_train.clone();
523
524 let n = self.n;
526 let mut k = vec![0.0_f32; n * n];
527 for i in 0..n {
528 for j in 0..n {
529 k[i * n + j] = self.kernel.kernel(&self.x_train[i], &self.x_train[j]);
530 }
531 k[i * n + i] += self.kernel.noise_var;
532 }
533
534 let l = cholesky_decompose(&k, n)?;
536
537 let ly = forward_substitution(&l, &y_train, n);
539 self.alpha = back_substitution_transpose(&l, &ly, n);
540 self.chol = l;
541 Ok(())
542 }
543
544 pub fn predict(&self, x: &[f32]) -> Result<(f32, f32)> {
546 if self.n == 0 {
547 return Err(SimError::NoTrainingData);
548 }
549 let k_star: Vec<f32> = self
551 .x_train
552 .iter()
553 .map(|xi| self.kernel.kernel(x, xi))
554 .collect();
555
556 let mean: f32 = k_star
558 .iter()
559 .zip(self.alpha.iter())
560 .map(|(a, b)| a * b)
561 .sum();
562
563 let k_self = self.kernel.signal_var; let v = forward_substitution(&self.chol, &k_star, self.n);
567 let v_sq: f32 = v.iter().map(|vi| vi * vi).sum();
568 let variance = (k_self - v_sq).max(0.0);
569
570 Ok((mean, variance))
571 }
572}
573
574fn cholesky_decompose(a: &[f32], n: usize) -> Result<Vec<f32>> {
581 let mut l = vec![0.0_f32; n * n];
582 for i in 0..n {
583 for j in 0..=i {
584 let mut sum = 0.0_f32;
585 for k in 0..j {
586 sum += l[i * n + k] * l[j * n + k];
587 }
588 if i == j {
589 let diag = a[i * n + i] - sum;
590 if diag <= 0.0 {
591 return Err(SimError::CholeskyFailed);
592 }
593 l[i * n + j] = diag.sqrt();
594 } else {
595 if l[j * n + j].abs() < 1e-12 {
596 return Err(SimError::CholeskyFailed);
597 }
598 l[i * n + j] = (a[i * n + j] - sum) / l[j * n + j];
599 }
600 }
601 }
602 Ok(l)
603}
604
605fn forward_substitution(l: &[f32], b: &[f32], n: usize) -> Vec<f32> {
607 let mut y = vec![0.0_f32; n];
608 for i in 0..n {
609 let mut s = b[i];
610 for j in 0..i {
611 s -= l[i * n + j] * y[j];
612 }
613 let diag = l[i * n + i];
614 y[i] = if diag.abs() > 1e-12 { s / diag } else { 0.0 };
615 }
616 y
617}
618
619fn back_substitution_transpose(l: &[f32], y: &[f32], n: usize) -> Vec<f32> {
621 let mut x = vec![0.0_f32; n];
622 for i in (0..n).rev() {
623 let mut s = y[i];
624 for j in i + 1..n {
625 s -= l[j * n + i] * x[j];
626 }
627 let diag = l[i * n + i];
628 x[i] = if diag.abs() > 1e-12 { s / diag } else { 0.0 };
629 }
630 x
631}
632
633#[derive(Debug, Clone)]
643pub struct LatinHypercubeSampler {
644 pub n_samples: usize,
645 pub n_dims: usize,
646}
647
648impl LatinHypercubeSampler {
649 pub fn new(n_samples: usize, n_dims: usize) -> Result<Self> {
651 if n_samples == 0 || n_dims == 0 {
652 return Err(SimError::InvalidConfig(
653 "n_samples and n_dims must be positive".to_string(),
654 ));
655 }
656 Ok(LatinHypercubeSampler { n_samples, n_dims })
657 }
658
659 pub fn sample(&self, bounds: &[(f32, f32)], rng: &mut StdRng) -> Result<Vec<Vec<f32>>> {
663 if bounds.len() != self.n_dims {
664 return Err(SimError::DimensionMismatch {
665 expected: self.n_dims,
666 got: bounds.len(),
667 });
668 }
669 let n = self.n_samples;
670 let mut design = vec![vec![0.0_f32; self.n_dims]; n];
673
674 for d in 0..self.n_dims {
675 let mut perm: Vec<usize> = (0..n).collect();
677 fisher_yates_shuffle(&mut perm, rng);
678
679 let (lo, hi) = bounds[d];
680 let range = hi - lo;
681 for (i, &stratum) in perm.iter().enumerate() {
682 let u: f32 = rng.random::<f32>();
683 let unit = (stratum as f32 + u) / n as f32;
684 design[i][d] = lo + unit * range;
685 }
686 }
687 Ok(design)
688 }
689
690 pub fn maximin_lhs(
693 &self,
694 bounds: &[(f32, f32)],
695 n_candidates: usize,
696 rng: &mut StdRng,
697 ) -> Result<Vec<Vec<f32>>> {
698 if n_candidates == 0 {
699 return Err(SimError::InvalidConfig(
700 "n_candidates must be positive".to_string(),
701 ));
702 }
703 let mut best: Vec<Vec<f32>> = vec![];
704 let mut best_mindist = -1.0_f32;
705
706 for _ in 0..n_candidates {
707 let design = self.sample(bounds, rng)?;
708 let md = min_pairwise_distance(&design);
709 if md > best_mindist {
710 best_mindist = md;
711 best = design;
712 }
713 }
714 Ok(best)
715 }
716}
717
718fn fisher_yates_shuffle(v: &mut [usize], rng: &mut StdRng) {
720 let n = v.len();
721 for i in (1..n).rev() {
722 let j = rng.random_range(0..=i);
723 v.swap(i, j);
724 }
725}
726
727fn min_pairwise_distance(points: &[Vec<f32>]) -> f32 {
729 let n = points.len();
730 if n < 2 {
731 return f32::INFINITY;
732 }
733 let mut min_d = f32::INFINITY;
734 for i in 0..n {
735 for j in i + 1..n {
736 let d: f32 = points[i]
737 .iter()
738 .zip(points[j].iter())
739 .map(|(a, b)| (a - b) * (a - b))
740 .sum::<f32>()
741 .sqrt();
742 if d < min_d {
743 min_d = d;
744 }
745 }
746 }
747 min_d
748}
749
750fn standard_normal_cdf(z: f32) -> f32 {
757 let t = 1.0 / (1.0 + 0.2316419 * z.abs());
759 let poly = t
760 * (0.319_381_54
761 + t * (-0.356_563_78 + t * (1.781_477_9 + t * (-1.821_255_9 + t * 1.330_274_5))));
762 let pdf = (-z * z / 2.0).exp() / (2.0 * std::f32::consts::PI).sqrt();
763 if z >= 0.0 {
764 1.0 - pdf * poly
765 } else {
766 pdf * poly
767 }
768}
769
770fn standard_normal_pdf(z: f32) -> f32 {
772 (-z * z / 2.0).exp() / (2.0 * std::f32::consts::PI).sqrt()
773}
774
775#[derive(Debug, Clone)]
777pub struct AdaptiveSampler {
778 pub surrogate: GpSurrogate,
779}
780
781impl AdaptiveSampler {
782 pub fn new(surrogate: GpSurrogate) -> Self {
784 AdaptiveSampler { surrogate }
785 }
786
787 pub fn expected_improvement(&self, x: &[f32], y_best: f32) -> f32 {
790 match self.surrogate.predict(x) {
791 Ok((mean, var)) => {
792 let std_dev = var.sqrt().max(1e-8);
793 let z = (mean - y_best) / std_dev;
794 let ei =
795 (mean - y_best) * standard_normal_cdf(z) + std_dev * standard_normal_pdf(z);
796 ei.max(0.0)
797 }
798 Err(_) => 0.0,
799 }
800 }
801
802 pub fn next_point(&self, candidates: &[Vec<f32>], y_best: f32) -> usize {
804 candidates
805 .iter()
806 .enumerate()
807 .map(|(i, x)| (i, self.expected_improvement(x, y_best)))
808 .max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
809 .map(|(i, _)| i)
810 .unwrap_or(0)
811 }
812}
813
814#[derive(Debug, Clone)]
821pub struct Spring {
822 pub node_i: usize,
823 pub node_j: usize,
824 pub stiffness: f32,
825 pub rest_length: f32,
826}
827
828#[derive(Debug, Clone)]
832pub struct SpringMassSystem {
833 pub masses: Vec<f32>,
834 pub springs: Vec<Spring>,
835}
836
837impl SpringMassSystem {
838 pub fn new(masses: Vec<f32>, springs: Vec<(usize, usize, f32, f32)>) -> Result<Self> {
840 if masses.is_empty() {
841 return Err(SimError::InvalidConfig(
842 "masses must be non-empty".to_string(),
843 ));
844 }
845 let springs = springs
846 .into_iter()
847 .map(|(i, j, k, r)| Spring {
848 node_i: i,
849 node_j: j,
850 stiffness: k,
851 rest_length: r,
852 })
853 .collect();
854 Ok(SpringMassSystem { masses, springs })
855 }
856
857 fn compute_forces(&self, positions: &[f32]) -> Vec<f32> {
859 let n = self.masses.len();
860 let mut forces = vec![0.0_f32; n];
861 for s in &self.springs {
862 if s.node_i >= n || s.node_j >= n {
863 continue;
864 }
865 let dx = positions[s.node_j] - positions[s.node_i];
866 let current_len = dx.abs();
867 let extension = current_len - s.rest_length;
868 let force_mag = s.stiffness * extension;
869 let dir = if dx >= 0.0 { 1.0_f32 } else { -1.0_f32 };
871 forces[s.node_i] += force_mag * dir;
872 forces[s.node_j] -= force_mag * dir;
873 }
874 forces
875 }
876
877 pub fn simulate_step(
880 &self,
881 positions: &[f32],
882 velocities: &[f32],
883 dt: f32,
884 ) -> Result<(Vec<f32>, Vec<f32>)> {
885 let n = self.masses.len();
886 if positions.len() != n || velocities.len() != n {
887 return Err(SimError::DimensionMismatch {
888 expected: n,
889 got: positions.len(),
890 });
891 }
892
893 let forces = self.compute_forces(positions);
894
895 let mut acc = vec![0.0_f32; n];
897 for i in 0..n {
898 acc[i] = forces[i] / self.masses[i].max(1e-12);
899 }
900
901 let new_pos: Vec<f32> = (0..n)
902 .map(|i| positions[i] + velocities[i] * dt + 0.5 * acc[i] * dt * dt)
903 .collect();
904
905 let forces2 = self.compute_forces(&new_pos);
906 let new_vel: Vec<f32> = (0..n)
907 .map(|i| {
908 let acc2 = forces2[i] / self.masses[i].max(1e-12);
909 velocities[i] + 0.5 * (acc[i] + acc2) * dt
910 })
911 .collect();
912
913 Ok((new_pos, new_vel))
914 }
915
916 pub fn kinetic_energy(&self, velocities: &[f32]) -> f32 {
918 self.masses
919 .iter()
920 .zip(velocities.iter())
921 .map(|(m, v)| 0.5 * m * v * v)
922 .sum()
923 }
924
925 pub fn potential_energy(&self, positions: &[f32]) -> f32 {
927 let n = positions.len();
928 self.springs
929 .iter()
930 .filter(|s| s.node_i < n && s.node_j < n)
931 .map(|s| {
932 let dx = (positions[s.node_j] - positions[s.node_i]).abs();
933 let ext = dx - s.rest_length;
934 0.5 * s.stiffness * ext * ext
935 })
936 .sum()
937 }
938}
939
940#[derive(Debug, Clone)]
946pub struct ReynoldsStressTensor {
947 pub s11: f32,
948 pub s12: f32,
949 pub s13: f32,
950 pub s22: f32,
951 pub s23: f32,
952 pub s33: f32,
953}
954
955impl ReynoldsStressTensor {
956 pub fn frobenius_norm(&self) -> f32 {
958 let trace_sq = self.s11 * self.s11
959 + self.s22 * self.s22
960 + self.s33 * self.s33
961 + 2.0 * (self.s12 * self.s12 + self.s13 * self.s13 + self.s23 * self.s23);
962 (2.0 * trace_sq).sqrt()
963 }
964}
965
966#[derive(Debug, Clone)]
970pub struct SmagorinskyModel {
971 pub cs: f32,
973 pub delta: f32,
975}
976
977impl SmagorinskyModel {
978 pub fn new(cs: f32, delta: f32) -> Self {
980 SmagorinskyModel { cs, delta }
981 }
982
983 pub fn eddy_viscosity(&self, strain_rate: &ReynoldsStressTensor) -> f32 {
985 let cs_delta = self.cs * self.delta;
986 cs_delta * cs_delta * strain_rate.frobenius_norm()
987 }
988}
989
990#[derive(Debug, Clone)]
995pub struct MlCorrectionModel {
996 pub weights: Vec<f32>,
997 pub bias: Vec<f32>,
998 pub feature_dim: usize,
999 pub output_dim: usize,
1000}
1001
1002impl MlCorrectionModel {
1003 pub fn new_zeros(feature_dim: usize, output_dim: usize) -> Self {
1005 MlCorrectionModel {
1006 weights: vec![0.0_f32; output_dim * feature_dim],
1007 bias: vec![0.0_f32; output_dim],
1008 feature_dim,
1009 output_dim,
1010 }
1011 }
1012
1013 pub fn apply_correction(&self, rans_output: &[f32], features: &[f32]) -> Vec<f32> {
1015 let out_dim = self.output_dim.min(rans_output.len());
1016 (0..out_dim)
1017 .map(|o| {
1018 let corr: f32 = (0..self.feature_dim.min(features.len()))
1019 .map(|fi| self.weights[o * self.feature_dim + fi] * features[fi])
1020 .sum::<f32>()
1021 + self.bias[o];
1022 rans_output[o] + corr
1023 })
1024 .collect()
1025 }
1026}
1027
1028pub struct SimulationDataAugmenter;
1034
1035impl SimulationDataAugmenter {
1036 pub fn noise_injection(x: &[f32], noise_std: f32, rng: &mut StdRng) -> Vec<f32> {
1038 x.iter()
1039 .map(|&v| {
1040 let noise = sample_standard_normal_f32(rng) * noise_std;
1041 v + noise
1042 })
1043 .collect()
1044 }
1045
1046 pub fn symmetry_augment(x: &[f32], symmetry_matrix: &[f32], n: usize) -> Vec<Vec<f32>> {
1051 let dim = x.len();
1052 if dim == 0 || n == 0 {
1053 return vec![];
1054 }
1055 let mat_dim = (symmetry_matrix.len() as f32).sqrt().round() as usize;
1056 if mat_dim * mat_dim != symmetry_matrix.len() || mat_dim != dim {
1057 return (0..n).map(|_| x.to_vec()).collect();
1059 }
1060
1061 let mut results = Vec::with_capacity(n);
1062 let mut current = x.to_vec();
1063 for _ in 0..n {
1064 current = mat_vec_mul(symmetry_matrix, ¤t, dim);
1065 results.push(current.clone());
1066 }
1067 results
1068 }
1069
1070 pub fn interpolate_between(x1: &[f32], x2: &[f32], n_interp: usize) -> Vec<Vec<f32>> {
1073 if n_interp == 0 {
1074 return vec![];
1075 }
1076 let len = x1.len().min(x2.len());
1077 (1..=n_interp)
1078 .map(|k| {
1079 let t = k as f32 / (n_interp + 1) as f32;
1080 (0..len).map(|i| x1[i] + t * (x2[i] - x1[i])).collect()
1081 })
1082 .collect()
1083 }
1084}
1085
1086fn sample_standard_normal_f32(rng: &mut StdRng) -> f32 {
1088 let u1: f32 = rng.random::<f32>().max(1e-37);
1089 let u2: f32 = rng.random::<f32>();
1090 (-2.0 * u1.ln()).sqrt() * (2.0 * std::f32::consts::PI * u2).cos()
1091}
1092
1093fn mat_vec_mul(a: &[f32], x: &[f32], n: usize) -> Vec<f32> {
1095 (0..n)
1096 .map(|i| {
1097 (0..n)
1098 .map(|j| a[i * n + j] * x.get(j).copied().unwrap_or(0.0))
1099 .sum::<f32>()
1100 })
1101 .collect()
1102}
1103
1104pub struct SimulationMetrics;
1110
1111impl SimulationMetrics {
1112 pub fn relative_l2_error(pred: &[f32], true_vals: &[f32]) -> f32 {
1114 let n = pred.len().min(true_vals.len());
1115 if n == 0 {
1116 return 0.0;
1117 }
1118 let num: f32 = (0..n)
1119 .map(|i| (pred[i] - true_vals[i]).powi(2))
1120 .sum::<f32>()
1121 .sqrt();
1122 let den: f32 = (0..n).map(|i| true_vals[i].powi(2)).sum::<f32>().sqrt();
1123 if den < 1e-12 {
1124 num
1125 } else {
1126 num / den
1127 }
1128 }
1129
1130 pub fn max_absolute_error(pred: &[f32], true_vals: &[f32]) -> f32 {
1132 pred.iter()
1133 .zip(true_vals.iter())
1134 .map(|(p, t)| (p - t).abs())
1135 .fold(0.0_f32, f32::max)
1136 }
1137
1138 pub fn q2_score(pred: &[f32], true_vals: &[f32]) -> f32 {
1142 let n = pred.len().min(true_vals.len());
1143 if n == 0 {
1144 return 0.0;
1145 }
1146 let mean: f32 = true_vals[..n].iter().sum::<f32>() / n as f32;
1147 let ss_res: f32 = (0..n).map(|i| (pred[i] - true_vals[i]).powi(2)).sum();
1148 let ss_tot: f32 = (0..n).map(|i| (true_vals[i] - mean).powi(2)).sum();
1149 if ss_tot < 1e-12 {
1150 if ss_res < 1e-12 {
1151 1.0
1152 } else {
1153 0.0
1154 }
1155 } else {
1156 1.0 - ss_res / ss_tot
1157 }
1158 }
1159
1160 pub fn coverage_probability(
1163 pred_mean: &[f32],
1164 pred_std: &[f32],
1165 true_vals: &[f32],
1166 z: f32,
1167 ) -> f32 {
1168 let n = pred_mean.len().min(pred_std.len()).min(true_vals.len());
1169 if n == 0 {
1170 return 0.0;
1171 }
1172 let covered = (0..n)
1173 .filter(|&i| {
1174 let lo = pred_mean[i] - z * pred_std[i];
1175 let hi = pred_mean[i] + z * pred_std[i];
1176 true_vals[i] >= lo && true_vals[i] <= hi
1177 })
1178 .count();
1179 covered as f32 / n as f32
1180 }
1181}
1182
1183#[cfg(test)]
1188mod tests {
1189 use super::*;
1190
1191 #[test]
1194 fn test_activation_relu_positive() {
1195 assert!((SurrogateActivation::ReLU.apply(2.0) - 2.0).abs() < 1e-6);
1196 }
1197
1198 #[test]
1199 fn test_activation_relu_negative() {
1200 assert!((SurrogateActivation::ReLU.apply(-1.5)).abs() < 1e-6);
1201 }
1202
1203 #[test]
1204 fn test_activation_tanh() {
1205 let v = SurrogateActivation::Tanh.apply(0.0);
1206 assert!(v.abs() < 1e-6);
1207 }
1208
1209 #[test]
1210 fn test_activation_gelu_zero() {
1211 let v = SurrogateActivation::GELU.apply(0.0);
1212 assert!(v.abs() < 1e-5);
1213 }
1214
1215 #[test]
1216 fn test_activation_swish_positive() {
1217 let v = SurrogateActivation::Swish.apply(1.0);
1218 assert!(v > 0.0);
1219 }
1220
1221 #[test]
1224 fn test_neural_surrogate_forward_shape() {
1225 let config = SurrogateConfig {
1226 input_dim: 4,
1227 hidden_dims: vec![8, 8],
1228 output_dim: 2,
1229 activation: SurrogateActivation::ReLU,
1230 };
1231 let model = NeuralSurrogate::new(config, 42);
1232 let x = vec![1.0_f32; 4];
1233 let y = model.forward(&x);
1234 assert_eq!(y.len(), 2);
1235 }
1236
1237 #[test]
1238 fn test_neural_surrogate_train_step_returns_finite_loss() {
1239 let config = SurrogateConfig {
1240 input_dim: 2,
1241 hidden_dims: vec![4],
1242 output_dim: 1,
1243 activation: SurrogateActivation::Tanh,
1244 };
1245 let mut model = NeuralSurrogate::new(config, 7);
1246 let x = vec![0.5_f32, -0.3];
1247 let y = vec![1.0_f32];
1248 let initial_loss = model.mse_loss(&x, &y);
1250 let mut final_loss = initial_loss;
1251 for _ in 0..10 {
1252 final_loss = model.train_step(&x, &y, 0.001);
1253 }
1254 assert!(initial_loss.is_finite());
1255 assert!(final_loss.is_finite());
1256 assert!(initial_loss >= 0.0);
1257 }
1258
1259 #[test]
1260 fn test_neural_surrogate_gelu_activation() {
1261 let config = SurrogateConfig {
1262 input_dim: 3,
1263 hidden_dims: vec![6],
1264 output_dim: 2,
1265 activation: SurrogateActivation::GELU,
1266 };
1267 let model = NeuralSurrogate::new(config, 99);
1268 let x = vec![0.1, 0.2, 0.3];
1269 let y = model.forward(&x);
1270 assert_eq!(y.len(), 2);
1271 }
1272
1273 #[test]
1274 fn test_neural_surrogate_swish_activation() {
1275 let config = SurrogateConfig {
1276 input_dim: 2,
1277 hidden_dims: vec![4, 4],
1278 output_dim: 1,
1279 activation: SurrogateActivation::Swish,
1280 };
1281 let model = NeuralSurrogate::new(config, 13);
1282 let out = model.forward(&[1.0, -1.0]);
1283 assert_eq!(out.len(), 1);
1284 }
1285
1286 #[test]
1289 fn test_ensemble_predict_mean_shape() {
1290 let config = SurrogateConfig {
1291 input_dim: 3,
1292 hidden_dims: vec![4],
1293 output_dim: 2,
1294 activation: SurrogateActivation::ReLU,
1295 };
1296 let ens = EnsembleSurrogate::new(config, 5, 0);
1297 let (mean, var) = ens.predict_with_uncertainty(&[0.1, 0.2, 0.3]);
1298 assert_eq!(mean.len(), 2);
1299 assert_eq!(var.len(), 2);
1300 }
1301
1302 #[test]
1303 fn test_ensemble_variance_non_negative() {
1304 let config = SurrogateConfig {
1305 input_dim: 2,
1306 hidden_dims: vec![4],
1307 output_dim: 1,
1308 activation: SurrogateActivation::Tanh,
1309 };
1310 let ens = EnsembleSurrogate::new(config, 3, 42);
1311 let (_, var) = ens.predict_with_uncertainty(&[0.5, -0.5]);
1312 assert!(var.iter().all(|&v| v >= 0.0));
1313 }
1314
1315 #[test]
1316 fn test_ensemble_active_learning_score() {
1317 let config = SurrogateConfig {
1318 input_dim: 2,
1319 hidden_dims: vec![4],
1320 output_dim: 1,
1321 activation: SurrogateActivation::ReLU,
1322 };
1323 let ens = EnsembleSurrogate::new(config, 4, 1);
1324 let score = ens.active_learning_score(&[0.0, 1.0]);
1325 assert!(score >= 0.0);
1326 }
1327
1328 #[test]
1329 fn test_ensemble_train_step_all() {
1330 let config = SurrogateConfig {
1331 input_dim: 2,
1332 hidden_dims: vec![4],
1333 output_dim: 1,
1334 activation: SurrogateActivation::ReLU,
1335 };
1336 let mut ens = EnsembleSurrogate::new(config, 3, 5);
1337 let losses = ens.train_step_all(&[0.1, 0.2], &[1.0], 0.01);
1338 assert_eq!(losses.len(), 3);
1339 }
1340
1341 #[test]
1344 fn test_physics_residual_zero_loss_when_satisfied() {
1345 let a = vec![1.0_f32, 0.0, 0.0, 1.0];
1347 let b = vec![1.0_f32, 1.0];
1348 let pr = PhysicsResidual::new(a, b, 2, 2);
1349 let loss = pr.physics_loss(&[1.0, 1.0]);
1350 assert!(loss.abs() < 1e-5);
1351 }
1352
1353 #[test]
1354 fn test_physics_residual_nonzero_loss() {
1355 let a = vec![1.0_f32, 0.0, 0.0, 1.0];
1356 let b = vec![0.0_f32, 0.0];
1357 let pr = PhysicsResidual::new(a, b, 2, 2);
1358 let loss = pr.physics_loss(&[1.0, 1.0]);
1359 assert!(loss > 0.0);
1360 }
1361
1362 #[test]
1363 fn test_pi_surrogate_total_loss_non_negative() {
1364 let config = SurrogateConfig {
1365 input_dim: 2,
1366 hidden_dims: vec![4],
1367 output_dim: 2,
1368 activation: SurrogateActivation::ReLU,
1369 };
1370 let pr = PhysicsResidual::new(vec![1.0, 0.0, 0.0, 1.0], vec![0.0, 0.0], 2, 2);
1371 let mut pi = PiSurrogate::new(config, vec![pr], 0.1, 77);
1372 let loss = pi.total_loss(&[0.1, 0.2], &[0.5, 0.5]);
1373 assert!(loss >= 0.0);
1374 let tl = pi.train_step(&[0.1, 0.2], &[0.5, 0.5], 0.001);
1376 assert!(tl.is_finite());
1377 }
1378
1379 #[test]
1382 fn test_rbf_kernel_self_distance() {
1383 let k = RbfKernel::new(1.0, 1.0, 0.01);
1384 let x = vec![1.0_f32, 2.0, 3.0];
1385 let val = k.kernel(&x, &x);
1386 assert!((val - 1.0).abs() < 1e-5);
1388 }
1389
1390 #[test]
1391 fn test_rbf_kernel_decreases_with_distance() {
1392 let k = RbfKernel::new(1.0, 1.0, 0.01);
1393 let x1 = vec![0.0_f32];
1394 let x2 = vec![1.0_f32];
1395 let x3 = vec![5.0_f32];
1396 let k12 = k.kernel(&x1, &x2);
1397 let k13 = k.kernel(&x1, &x3);
1398 assert!(k12 > k13);
1399 }
1400
1401 #[test]
1404 fn test_gp_surrogate_fit_and_predict() {
1405 let kernel = RbfKernel::new(1.0, 1.0, 0.1);
1406 let mut gp = GpSurrogate::new(kernel);
1407 let x_train: Vec<Vec<f32>> = vec![vec![0.0], vec![1.0], vec![2.0]];
1408 let y_train = vec![0.0_f32, 1.0, 0.0];
1409 gp.fit(x_train, y_train).expect("GP fit should succeed");
1410
1411 let (mean, var) = gp.predict(&[1.0]).expect("predict should succeed");
1412 assert!((mean - 1.0).abs() < 0.5);
1414 assert!(var >= 0.0);
1415 }
1416
1417 #[test]
1418 fn test_gp_surrogate_variance_zero_at_training_points() {
1419 let kernel = RbfKernel::new(1.0, 1.0, 1e-6);
1420 let mut gp = GpSurrogate::new(kernel);
1421 let x_train: Vec<Vec<f32>> = vec![vec![0.0], vec![2.0]];
1422 let y_train = vec![1.0_f32, -1.0];
1423 gp.fit(x_train, y_train).expect("fit");
1424 let (_m, v) = gp.predict(&[0.0]).expect("predict");
1425 assert!(v < 0.1);
1427 }
1428
1429 #[test]
1430 fn test_gp_surrogate_no_training_data_error() {
1431 let kernel = RbfKernel::new(1.0, 1.0, 0.1);
1432 let gp = GpSurrogate::new(kernel);
1433 let result = gp.predict(&[0.0]);
1434 assert!(result.is_err());
1435 }
1436
1437 #[test]
1440 fn test_lhs_sample_shape() {
1441 let mut rng = StdRng::seed_from_u64(42);
1442 let lhs = LatinHypercubeSampler::new(10, 3).expect("new");
1443 let bounds = vec![(0.0_f32, 1.0), (0.0, 2.0), (-1.0, 1.0)];
1444 let design = lhs.sample(&bounds, &mut rng).expect("sample");
1445 assert_eq!(design.len(), 10);
1446 assert_eq!(design[0].len(), 3);
1447 }
1448
1449 #[test]
1450 fn test_lhs_sample_within_bounds() {
1451 let mut rng = StdRng::seed_from_u64(7);
1452 let lhs = LatinHypercubeSampler::new(20, 2).expect("new");
1453 let bounds = vec![(1.0_f32, 3.0), (-2.0_f32, -0.5)];
1454 let design = lhs.sample(&bounds, &mut rng).expect("sample");
1455 for p in &design {
1456 assert!(p[0] >= 1.0 && p[0] <= 3.0);
1457 assert!(p[1] >= -2.0 && p[1] <= -0.5);
1458 }
1459 }
1460
1461 #[test]
1462 fn test_lhs_invalid_config() {
1463 assert!(LatinHypercubeSampler::new(0, 2).is_err());
1464 assert!(LatinHypercubeSampler::new(5, 0).is_err());
1465 }
1466
1467 #[test]
1468 fn test_maximin_lhs_returns_design() {
1469 let mut rng = StdRng::seed_from_u64(99);
1470 let lhs = LatinHypercubeSampler::new(5, 2).expect("new");
1471 let bounds = vec![(0.0_f32, 1.0), (0.0_f32, 1.0)];
1472 let best = lhs.maximin_lhs(&bounds, 5, &mut rng).expect("maximin");
1473 assert_eq!(best.len(), 5);
1474 }
1475
1476 #[test]
1479 fn test_adaptive_sampler_ei_positive() {
1480 let kernel = RbfKernel::new(1.0, 1.0, 0.1);
1481 let mut gp = GpSurrogate::new(kernel);
1482 let xt = vec![vec![0.0_f32], vec![1.0], vec![2.0]];
1483 let yt = vec![0.0_f32, 1.0, 0.5];
1484 gp.fit(xt, yt).expect("fit");
1485 let sampler = AdaptiveSampler::new(gp);
1486 let ei = sampler.expected_improvement(&[1.5], 0.5);
1487 assert!(ei >= 0.0);
1488 }
1489
1490 #[test]
1491 fn test_adaptive_sampler_next_point() {
1492 let kernel = RbfKernel::new(1.0, 1.0, 0.1);
1493 let mut gp = GpSurrogate::new(kernel);
1494 let xt = vec![vec![0.0_f32], vec![2.0]];
1495 let yt = vec![0.5_f32, 1.0];
1496 gp.fit(xt, yt).expect("fit");
1497 let sampler = AdaptiveSampler::new(gp);
1498 let candidates = vec![vec![0.5_f32], vec![1.0], vec![1.5], vec![2.5]];
1499 let idx = sampler.next_point(&candidates, 0.5);
1500 assert!(idx < candidates.len());
1501 }
1502
1503 #[test]
1506 fn test_spring_mass_simulate_step() {
1507 let masses = vec![1.0_f32, 1.0];
1508 let springs = vec![(0usize, 1usize, 10.0_f32, 1.0_f32)];
1509 let sys = SpringMassSystem::new(masses, springs).expect("new");
1510 let pos = vec![0.0_f32, 2.0];
1511 let vel = vec![0.0_f32, 0.0];
1512 let (new_pos, new_vel) = sys.simulate_step(&pos, &vel, 0.01).expect("step");
1513 assert_eq!(new_pos.len(), 2);
1514 assert_eq!(new_vel.len(), 2);
1515 }
1516
1517 #[test]
1518 fn test_spring_mass_kinetic_energy() {
1519 let masses = vec![2.0_f32, 1.0];
1520 let springs = vec![];
1521 let sys = SpringMassSystem::new(masses, springs).expect("new");
1522 let vel = vec![1.0_f32, 2.0];
1523 let ke = sys.kinetic_energy(&vel);
1524 assert!((ke - 3.0).abs() < 1e-5);
1526 }
1527
1528 #[test]
1529 fn test_spring_mass_potential_energy_at_rest() {
1530 let masses = vec![1.0_f32, 1.0];
1531 let springs = vec![(0usize, 1usize, 5.0_f32, 1.0_f32)];
1532 let sys = SpringMassSystem::new(masses, springs).expect("new");
1533 let pos = vec![0.0_f32, 1.0];
1535 let pe = sys.potential_energy(&pos);
1536 assert!(pe.abs() < 1e-5);
1537 }
1538
1539 #[test]
1540 fn test_spring_mass_energy_conservation_approx() {
1541 let masses = vec![1.0_f32, 1.0];
1542 let springs = vec![(0usize, 1usize, 50.0_f32, 1.0_f32)];
1543 let sys = SpringMassSystem::new(masses, springs).expect("new");
1544 let mut pos = vec![0.0_f32, 2.0]; let mut vel = vec![0.0_f32, 0.0];
1546 let e0 = sys.kinetic_energy(&vel) + sys.potential_energy(&pos);
1547 for _ in 0..200 {
1548 let (np, nv) = sys.simulate_step(&pos, &vel, 0.001).expect("step");
1549 pos = np;
1550 vel = nv;
1551 }
1552 let e1 = sys.kinetic_energy(&vel) + sys.potential_energy(&pos);
1553 assert!((e1 - e0).abs() / (e0.abs() + 1e-6) < 0.05);
1555 }
1556
1557 #[test]
1560 fn test_reynolds_stress_frobenius_norm() {
1561 let rst = ReynoldsStressTensor {
1562 s11: 1.0,
1563 s12: 0.0,
1564 s13: 0.0,
1565 s22: 1.0,
1566 s23: 0.0,
1567 s33: 1.0,
1568 };
1569 let norm = rst.frobenius_norm();
1570 assert!((norm - (6.0_f32).sqrt()).abs() < 1e-4);
1574 }
1575
1576 #[test]
1577 fn test_smagorinsky_eddy_viscosity() {
1578 let model = SmagorinskyModel::new(0.1, 0.5);
1579 let rst = ReynoldsStressTensor {
1580 s11: 1.0,
1581 s12: 0.0,
1582 s13: 0.0,
1583 s22: 1.0,
1584 s23: 0.0,
1585 s33: 1.0,
1586 };
1587 let nu = model.eddy_viscosity(&rst);
1588 let expected = 0.0025 * (6.0_f32).sqrt();
1590 assert!((nu - expected).abs() < 1e-5);
1591 }
1592
1593 #[test]
1594 fn test_ml_correction_model_zero_weights() {
1595 let model = MlCorrectionModel::new_zeros(3, 2);
1596 let rans = vec![1.0_f32, 2.0];
1597 let features = vec![0.5_f32, -0.5, 1.0];
1598 let corrected = model.apply_correction(&rans, &features);
1599 assert!((corrected[0] - 1.0).abs() < 1e-6);
1601 assert!((corrected[1] - 2.0).abs() < 1e-6);
1602 }
1603
1604 #[test]
1607 fn test_noise_injection_shape() {
1608 let mut rng = StdRng::seed_from_u64(1);
1609 let x = vec![1.0_f32, 2.0, 3.0];
1610 let noisy = SimulationDataAugmenter::noise_injection(&x, 0.1, &mut rng);
1611 assert_eq!(noisy.len(), 3);
1612 }
1613
1614 #[test]
1615 fn test_noise_injection_zero_noise() {
1616 let mut rng = StdRng::seed_from_u64(0);
1617 let x = vec![5.0_f32, -3.0, 0.0];
1618 let noisy = SimulationDataAugmenter::noise_injection(&x, 0.0, &mut rng);
1620 for (a, b) in x.iter().zip(noisy.iter()) {
1621 assert!((a - b).abs() < 1e-6);
1622 }
1623 }
1624
1625 #[test]
1626 fn test_symmetry_augment_count() {
1627 let x = vec![1.0_f32, 0.0];
1628 let mat = vec![0.0_f32, -1.0, 1.0, 0.0];
1630 let copies = SimulationDataAugmenter::symmetry_augment(&x, &mat, 4);
1631 assert_eq!(copies.len(), 4);
1632 }
1633
1634 #[test]
1635 fn test_symmetry_augment_rotation_cycle() {
1636 let x = vec![1.0_f32, 0.0];
1637 let mat = vec![0.0_f32, -1.0, 1.0, 0.0];
1639 let copies = SimulationDataAugmenter::symmetry_augment(&x, &mat, 4);
1640 assert!((copies[3][0] - 1.0).abs() < 1e-4);
1642 assert!(copies[3][1].abs() < 1e-4);
1643 }
1644
1645 #[test]
1646 fn test_interpolate_between_count() {
1647 let x1 = vec![0.0_f32, 0.0];
1648 let x2 = vec![1.0_f32, 1.0];
1649 let interp = SimulationDataAugmenter::interpolate_between(&x1, &x2, 4);
1650 assert_eq!(interp.len(), 4);
1651 }
1652
1653 #[test]
1654 fn test_interpolate_between_values() {
1655 let x1 = vec![0.0_f32];
1656 let x2 = vec![10.0_f32];
1657 let interp = SimulationDataAugmenter::interpolate_between(&x1, &x2, 4);
1658 let expected = [2.0_f32, 4.0, 6.0, 8.0];
1660 for (v, e) in interp.iter().zip(expected.iter()) {
1661 assert!((v[0] - e).abs() < 1e-5);
1662 }
1663 }
1664
1665 #[test]
1668 fn test_relative_l2_error_perfect_prediction() {
1669 let pred = vec![1.0_f32, 2.0, 3.0];
1670 let true_vals = vec![1.0_f32, 2.0, 3.0];
1671 let err = SimulationMetrics::relative_l2_error(&pred, &true_vals);
1672 assert!(err.abs() < 1e-6);
1673 }
1674
1675 #[test]
1676 fn test_relative_l2_error_nonzero() {
1677 let pred = vec![1.1_f32, 2.0];
1678 let true_vals = vec![1.0_f32, 2.0];
1679 let err = SimulationMetrics::relative_l2_error(&pred, &true_vals);
1680 assert!(err > 0.0 && err < 1.0);
1681 }
1682
1683 #[test]
1684 fn test_max_absolute_error() {
1685 let pred = vec![1.0_f32, 3.0, 5.0];
1686 let true_vals = vec![1.5_f32, 2.5, 5.0];
1687 let err = SimulationMetrics::max_absolute_error(&pred, &true_vals);
1688 assert!((err - 0.5).abs() < 1e-5);
1689 }
1690
1691 #[test]
1692 fn test_q2_score_perfect() {
1693 let pred = vec![1.0_f32, 2.0, 3.0];
1694 let true_vals = vec![1.0_f32, 2.0, 3.0];
1695 let q2 = SimulationMetrics::q2_score(&pred, &true_vals);
1696 assert!((q2 - 1.0).abs() < 1e-5);
1697 }
1698
1699 #[test]
1700 fn test_q2_score_constant_prediction() {
1701 let n = 10;
1702 let true_vals: Vec<f32> = (0..n).map(|i| i as f32).collect();
1703 let mean = true_vals.iter().sum::<f32>() / n as f32;
1704 let pred = vec![mean; n];
1705 let q2 = SimulationMetrics::q2_score(&pred, &true_vals);
1706 assert!((q2 - 0.0).abs() < 1e-4);
1707 }
1708
1709 #[test]
1710 fn test_coverage_probability_full_coverage() {
1711 let means = vec![0.0_f32, 1.0, 2.0];
1712 let stds = vec![10.0_f32, 10.0, 10.0];
1713 let true_vals = vec![0.1_f32, 0.9, 2.1];
1714 let cov = SimulationMetrics::coverage_probability(&means, &stds, &true_vals, 1.0);
1715 assert!((cov - 1.0).abs() < 1e-5);
1716 }
1717
1718 #[test]
1719 fn test_coverage_probability_no_coverage() {
1720 let means = vec![0.0_f32];
1721 let stds = vec![0.0_f32];
1722 let true_vals = vec![100.0_f32];
1723 let cov = SimulationMetrics::coverage_probability(&means, &stds, &true_vals, 1.0);
1724 assert!(cov.abs() < 1e-5);
1725 }
1726
1727 #[test]
1730 fn test_cholesky_2x2() {
1731 let a = vec![4.0_f32, 2.0, 2.0, 3.0];
1733 let l = cholesky_decompose(&a, 2).expect("cholesky");
1734 assert!((l[0] - 2.0).abs() < 1e-5); assert!((l[2] - 1.0).abs() < 1e-5); assert!((l[3] - (2.0_f32).sqrt()).abs() < 1e-4); }
1738
1739 #[test]
1740 fn test_cholesky_non_pd_fails() {
1741 let a = vec![-1.0_f32, 0.0, 0.0, 1.0];
1743 let result = cholesky_decompose(&a, 2);
1744 assert!(result.is_err());
1745 }
1746}