1use crate::random::{
49 core::{seeded_rng, Random},
50 distributions::MultivariateNormal,
51 parallel::{ParallelRng, ThreadLocalRngPool},
52};
53use ::ndarray::{s, Array1, Array2, Array3, Axis};
54use rand::Rng;
55use rand_distr::{Distribution, Normal, Uniform};
56use std::collections::VecDeque;
57
58#[derive(Debug, Clone)]
64pub struct NormalizingFlow {
65 dimension: usize,
66 num_layers: usize,
67 flow_layers: Vec<FlowLayer>,
68 base_distribution: MultivariateNormal,
69 trained: bool,
70}
71
72#[derive(Debug, Clone)]
73struct FlowLayer {
74 mask: Array1<bool>,
76 scale_network: MLP,
77 translation_network: MLP,
78}
79
80#[derive(Debug, Clone)]
81struct MLP {
82 weights: Vec<Array2<f64>>,
84 biases: Vec<Array1<f64>>,
85 hidden_dims: Vec<usize>,
86}
87
88impl NormalizingFlow {
89 pub fn new(dimension: usize, num_layers: usize) -> Self {
91 let mut flow_layers = Vec::new();
92
93 for i in 0..num_layers {
94 let mut mask = Array1::from_elem(dimension, false);
96 for j in 0..dimension {
97 mask[j] = (j + i) % 2 == 0;
98 }
99
100 let hidden_dim = dimension.max(32);
101 let scale_net = MLP::new(&[dimension / 2, hidden_dim, hidden_dim, dimension / 2]);
102 let trans_net = MLP::new(&[dimension / 2, hidden_dim, hidden_dim, dimension / 2]);
103
104 flow_layers.push(FlowLayer {
105 mask,
106 scale_network: scale_net,
107 translation_network: trans_net,
108 });
109 }
110
111 let mut cov_matrix = vec![vec![0.0; dimension]; dimension];
113 for i in 0..dimension {
114 cov_matrix[i][i] = 1.0;
115 }
116
117 let base_distribution =
118 MultivariateNormal::new(vec![0.0; dimension], cov_matrix).expect("Operation failed");
119
120 Self {
121 dimension,
122 num_layers,
123 flow_layers,
124 base_distribution,
125 trained: false,
126 }
127 }
128
129 pub fn train(&mut self, training_data: &Array2<f64>, num_epochs: usize) -> Result<(), String> {
131 let learning_rate = 0.001;
132 let batch_size = 32;
133
134 for epoch in 0..num_epochs {
135 let num_batches = training_data.nrows().div_ceil(batch_size);
137
138 for batch_idx in 0..num_batches {
139 let start_idx = batch_idx * batch_size;
140 let end_idx = (start_idx + batch_size).min(training_data.nrows());
141
142 let batch = training_data.slice(s![start_idx..end_idx, ..]);
143
144 let mut _total_loss = 0.0;
146 for i in 0..batch.nrows() {
147 let x = batch.row(i).to_owned();
148 let (z, log_det_jacobian) = self.forward(&x)?;
149
150 let log_prob_z = self.base_distribution.log_probability(&z.to_vec())?;
152 let log_prob_x = log_prob_z + log_det_jacobian;
153
154 _total_loss -= log_prob_x; }
156
157 self.update_parameters(learning_rate, &batch)?;
159 }
160
161 if epoch % 100 == 0 {
162 println!("Epoch {}: Training flow...", epoch);
163 }
164 }
165
166 self.trained = true;
167 Ok(())
168 }
169
170 fn forward(&self, x: &Array1<f64>) -> Result<(Array1<f64>, f64), String> {
172 let mut z = x.clone();
173 let mut log_det_jacobian = 0.0;
174
175 for layer in &self.flow_layers {
176 let (new_z, log_det) = layer.forward(&z)?;
177 z = new_z;
178 log_det_jacobian += log_det;
179 }
180
181 Ok((z, log_det_jacobian))
182 }
183
184 fn inverse(&self, z: &Array1<f64>) -> Result<Array1<f64>, String> {
186 let mut x = z.clone();
187
188 for layer in self.flow_layers.iter().rev() {
190 x = layer.inverse(&x)?;
191 }
192
193 Ok(x)
194 }
195
196 pub fn sample(&self, num_samples: usize, seed: u64) -> Result<Array2<f64>, String> {
198 if !self.trained {
199 return Err("Flow must be trained before sampling".to_string());
200 }
201
202 let mut rng = seeded_rng(seed);
203 let mut samples = Array2::zeros((num_samples, self.dimension));
204
205 for i in 0..num_samples {
206 let z = Array1::from_vec(self.base_distribution.sample(&mut rng));
208
209 let x = self.inverse(&z)?;
211
212 for j in 0..self.dimension {
213 samples[[i, j]] = x[j];
214 }
215 }
216
217 Ok(samples)
218 }
219
220 pub fn log_probability(&self, x: &Array1<f64>) -> Result<f64, String> {
222 if !self.trained {
223 return Err("Flow must be trained before computing probabilities".to_string());
224 }
225
226 let (z, log_det_jacobian) = self.forward(x)?;
227 let log_prob_z = self.base_distribution.log_probability(&z.to_vec())?;
228 Ok(log_prob_z + log_det_jacobian)
229 }
230
231 fn update_parameters(
233 &mut self,
234 learning_rate: f64,
235 batch: &crate::ndarray::ArrayView2<f64>,
236 ) -> Result<(), String> {
237 for layer in &mut self.flow_layers {
239 layer.update_parameters(learning_rate, batch)?;
240 }
241 Ok(())
242 }
243}
244
245impl FlowLayer {
246 fn forward(&self, x: &Array1<f64>) -> Result<(Array1<f64>, f64), String> {
248 let mut y = x.clone();
249 let mut log_det_jacobian = 0.0;
250
251 let x_unchanged: Vec<f64> = x
253 .iter()
254 .enumerate()
255 .filter(|(i, _)| self.mask[*i])
256 .map(|(_, &val)| val)
257 .collect();
258
259 let x_to_transform: Vec<f64> = x
260 .iter()
261 .enumerate()
262 .filter(|(i, _)| !self.mask[*i])
263 .map(|(_, &val)| val)
264 .collect();
265
266 if !x_unchanged.is_empty() && !x_to_transform.is_empty() {
267 let scale = self
269 .scale_network
270 .forward(&Array1::from_vec(x_unchanged.clone()))?;
271 let translation = self
272 .translation_network
273 .forward(&Array1::from_vec(x_unchanged))?;
274
275 let mut transform_idx = 0;
277 for (i, &masked) in self.mask.iter().enumerate() {
278 if !masked && transform_idx < scale.len() && transform_idx < translation.len() {
279 let s = scale[transform_idx];
280 let t = translation[transform_idx];
281 y[i] = x_to_transform[transform_idx] * s.exp() + t;
282 log_det_jacobian += s;
283 transform_idx += 1;
284 }
285 }
286 }
287
288 Ok((y, log_det_jacobian))
289 }
290
291 fn inverse(&self, y: &Array1<f64>) -> Result<Array1<f64>, String> {
293 let mut x = y.clone();
294
295 let y_unchanged: Vec<f64> = y
297 .iter()
298 .enumerate()
299 .filter(|(i, _)| self.mask[*i])
300 .map(|(_, &val)| val)
301 .collect();
302
303 if !y_unchanged.is_empty() {
304 let scale = self
306 .scale_network
307 .forward(&Array1::from_vec(y_unchanged.clone()))?;
308 let translation = self
309 .translation_network
310 .forward(&Array1::from_vec(y_unchanged))?;
311
312 let mut transform_idx = 0;
314 for (i, &masked) in self.mask.iter().enumerate() {
315 if !masked && transform_idx < scale.len() && transform_idx < translation.len() {
316 let s = scale[transform_idx];
317 let t = translation[transform_idx];
318 x[i] = (y[i] - t) * (-s).exp();
319 transform_idx += 1;
320 }
321 }
322 }
323
324 Ok(x)
325 }
326
327 fn update_parameters(
329 &mut self,
330 learning_rate: f64,
331 _batch: &crate::ndarray::ArrayView2<f64>,
332 ) -> Result<(), String> {
333 self.scale_network.update_parameters(learning_rate)?;
335 self.translation_network.update_parameters(learning_rate)?;
336 Ok(())
337 }
338}
339
340impl MLP {
341 fn new(layer_sizes: &[usize]) -> Self {
343 let mut weights = Vec::new();
344 let mut biases = Vec::new();
345
346 for i in 0..layer_sizes.len() - 1 {
347 let w = Array2::zeros((layer_sizes[i + 1], layer_sizes[i]));
348 let b = Array1::zeros(layer_sizes[i + 1]);
349 weights.push(w);
350 biases.push(b);
351 }
352
353 Self {
354 weights,
355 biases,
356 hidden_dims: layer_sizes[1..layer_sizes.len() - 1].to_vec(),
357 }
358 }
359
360 fn forward(&self, input: &Array1<f64>) -> Result<Array1<f64>, String> {
362 let mut x = input.clone();
363
364 for (i, (weight, bias)) in self.weights.iter().zip(self.biases.iter()).enumerate() {
365 let mut output = Array1::zeros(weight.nrows());
367 for j in 0..weight.nrows() {
368 let mut sum = bias[j];
369 for k in 0..weight.ncols() {
370 if k < x.len() {
371 sum += weight[[j, k]] * x[k];
372 }
373 }
374 output[j] = sum;
375 }
376
377 if i < self.weights.len() - 1 {
379 for elem in output.iter_mut() {
380 *elem = elem.max(0.0); }
382 }
383
384 x = output;
385 }
386
387 Ok(x)
388 }
389
390 fn update_parameters(&mut self, _learning_rate: f64) -> Result<(), String> {
392 Ok(())
394 }
395}
396
397#[derive(Debug)]
399pub struct ScoreBasedDiffusion {
400 config: DiffusionConfig,
401 score_network: ScoreNetwork,
402 noise_schedule: NoiseSchedule,
403 trained: bool,
404}
405
406#[derive(Debug, Clone)]
407pub struct DiffusionConfig {
408 pub dimension: usize,
409 pub num_timesteps: usize,
410 pub beta_start: f64,
411 pub beta_end: f64,
412 pub hidden_dims: Vec<usize>,
413}
414
415impl Default for DiffusionConfig {
416 fn default() -> Self {
417 Self {
418 dimension: 10,
419 num_timesteps: 1000,
420 beta_start: 1e-4,
421 beta_end: 0.02,
422 hidden_dims: vec![128, 256, 128],
423 }
424 }
425}
426
427#[derive(Debug)]
428struct ScoreNetwork {
429 mlp: MLP,
431 time_embedding_dim: usize,
432}
433
434#[derive(Debug)]
435struct NoiseSchedule {
436 betas: Vec<f64>,
437 alphas: Vec<f64>,
438 alpha_bars: Vec<f64>,
439}
440
441impl ScoreBasedDiffusion {
442 pub fn new(config: DiffusionConfig) -> Self {
444 let time_embedding_dim = 64;
445 let input_dim = config.dimension + time_embedding_dim;
446
447 let mut network_dims = vec![input_dim];
448 network_dims.extend_from_slice(&config.hidden_dims);
449 network_dims.push(config.dimension);
450
451 let score_network = ScoreNetwork {
452 mlp: MLP::new(&network_dims),
453 time_embedding_dim,
454 };
455
456 let noise_schedule =
457 NoiseSchedule::new(config.num_timesteps, config.beta_start, config.beta_end);
458
459 Self {
460 config,
461 score_network,
462 noise_schedule,
463 trained: false,
464 }
465 }
466
467 pub fn train(&mut self, training_data: &Array2<f64>) -> Result<(), String> {
469 let num_epochs = 1000;
470 let batch_size = 32;
471
472 for epoch in 0..num_epochs {
473 for _ in 0..training_data.nrows().div_ceil(batch_size) {
475 let mut rng = seeded_rng(42 + epoch as u64);
477 let t = rng
478 .sample(Uniform::new(0, self.config.num_timesteps).expect("Operation failed"));
479
480 let noise = self.sample_noise(training_data.nrows(), &mut rng)?;
482 let noisy_data = self.add_noise(training_data, &noise, t)?;
483
484 self.score_network.train_step(&noisy_data, &noise, t)?;
486 }
487
488 if epoch % 100 == 0 {
489 println!("Epoch {}: Training diffusion model...", epoch);
490 }
491 }
492
493 self.trained = true;
494 Ok(())
495 }
496
497 pub fn sample(
499 &self,
500 num_samples: usize,
501 num_timesteps: usize,
502 seed: u64,
503 ) -> Result<Array2<f64>, String> {
504 if !self.trained {
505 return Err("Model must be trained before sampling".to_string());
506 }
507
508 let mut rng = seeded_rng(seed);
509 let mut samples = Array2::zeros((num_samples, self.config.dimension));
510
511 for i in 0..num_samples {
513 for j in 0..self.config.dimension {
514 samples[[i, j]] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
515 }
516 }
517
518 let timestep_stride = self.config.num_timesteps / num_timesteps;
520
521 for t in (0..num_timesteps).rev() {
522 let actual_t = t * timestep_stride;
523
524 let predicted_noise = self.score_network.predict(&samples, actual_t)?;
526
527 samples = self.ddpm_update(&samples, &predicted_noise, actual_t, &mut rng)?;
529 }
530
531 Ok(samples)
532 }
533
534 fn sample_noise(
536 &self,
537 batch_size: usize,
538 rng: &mut Random<rand::rngs::StdRng>,
539 ) -> Result<Array2<f64>, String> {
540 let mut noise = Array2::zeros((batch_size, self.config.dimension));
541 for i in 0..batch_size {
542 for j in 0..self.config.dimension {
543 noise[[i, j]] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
544 }
545 }
546 Ok(noise)
547 }
548
549 fn add_noise(
551 &self,
552 data: &Array2<f64>,
553 noise: &Array2<f64>,
554 t: usize,
555 ) -> Result<Array2<f64>, String> {
556 let alpha_bar = self.noise_schedule.alpha_bars[t];
557 let mut noisy_data = Array2::zeros(data.raw_dim());
558
559 for i in 0..data.nrows() {
560 for j in 0..data.ncols() {
561 noisy_data[[i, j]] =
562 alpha_bar.sqrt() * data[[i, j]] + (1.0 - alpha_bar).sqrt() * noise[[i, j]];
563 }
564 }
565
566 Ok(noisy_data)
567 }
568
569 fn ddpm_update(
571 &self,
572 x_t: &Array2<f64>,
573 predicted_noise: &Array2<f64>,
574 t: usize,
575 rng: &mut Random<rand::rngs::StdRng>,
576 ) -> Result<Array2<f64>, String> {
577 let alpha = self.noise_schedule.alphas[t];
578 let alpha_bar = self.noise_schedule.alpha_bars[t];
579 let beta = self.noise_schedule.betas[t];
580
581 let mut x_t_minus_1 = Array2::zeros(x_t.raw_dim());
582
583 for i in 0..x_t.nrows() {
584 for j in 0..x_t.ncols() {
585 let mean_coeff = 1.0 / alpha.sqrt();
587 let noise_coeff = beta / (1.0 - alpha_bar).sqrt();
588 let mean = mean_coeff * (x_t[[i, j]] - noise_coeff * predicted_noise[[i, j]]);
589
590 let noise = if t > 0 {
592 rng.sample(Normal::new(0.0, beta.sqrt()).expect("Operation failed"))
593 } else {
594 0.0
595 };
596
597 x_t_minus_1[[i, j]] = mean + noise;
598 }
599 }
600
601 Ok(x_t_minus_1)
602 }
603}
604
605impl NoiseSchedule {
606 fn new(num_timesteps: usize, beta_start: f64, beta_end: f64) -> Self {
607 let mut betas = Vec::with_capacity(num_timesteps);
608 let mut alphas = Vec::with_capacity(num_timesteps);
609 let mut alpha_bars = Vec::with_capacity(num_timesteps);
610
611 for i in 0..num_timesteps {
613 let beta =
614 beta_start + (beta_end - beta_start) * (i as f64) / (num_timesteps as f64 - 1.0);
615 let alpha = 1.0 - beta;
616
617 betas.push(beta);
618 alphas.push(alpha);
619
620 let alpha_bar = if i == 0 {
622 alpha
623 } else {
624 alpha_bars[i - 1] * alpha
625 };
626 alpha_bars.push(alpha_bar);
627 }
628
629 Self {
630 betas,
631 alphas,
632 alpha_bars,
633 }
634 }
635}
636
637impl ScoreNetwork {
638 fn train_step(
640 &mut self,
641 noisy_data: &Array2<f64>,
642 target_noise: &Array2<f64>,
643 t: usize,
644 ) -> Result<(), String> {
645 for i in 0..noisy_data.nrows() {
647 let input = self.prepare_input(&noisy_data.row(i).to_owned(), t)?;
648 let _predicted = self.mlp.forward(&input)?;
649 }
651 Ok(())
652 }
653
654 fn predict(&self, x: &Array2<f64>, t: usize) -> Result<Array2<f64>, String> {
656 let mut predictions = Array2::zeros(x.raw_dim());
657
658 for i in 0..x.nrows() {
659 let input = self.prepare_input(&x.row(i).to_owned(), t)?;
660 let pred = self.mlp.forward(&input)?;
661
662 for j in 0..pred.len().min(x.ncols()) {
663 predictions[[i, j]] = pred[j];
664 }
665 }
666
667 Ok(predictions)
668 }
669
670 fn prepare_input(&self, x: &Array1<f64>, t: usize) -> Result<Array1<f64>, String> {
672 let mut time_emb = Array1::zeros(self.time_embedding_dim);
674 for i in 0..self.time_embedding_dim {
675 let freq = 2.0 * std::f64::consts::PI * (t as f64)
676 / (10000.0_f64.powf(2.0 * (i as f64) / (self.time_embedding_dim as f64)));
677 time_emb[i] = if i % 2 == 0 { freq.sin() } else { freq.cos() };
678 }
679
680 let mut input = Array1::zeros(x.len() + time_emb.len());
682 for i in 0..x.len() {
683 input[i] = x[i];
684 }
685 for i in 0..time_emb.len() {
686 input[x.len() + i] = time_emb[i];
687 }
688
689 Ok(input)
690 }
691}
692
693#[derive(Debug)]
695pub struct EnergyBasedModel {
696 energy_network: MLP,
697 dimension: usize,
698 temperature: f64,
699 mcmc_steps: usize,
700}
701
702impl EnergyBasedModel {
703 pub fn new(dimension: usize, hidden_dims: &[usize]) -> Self {
705 let mut network_dims = vec![dimension];
706 network_dims.extend_from_slice(hidden_dims);
707 network_dims.push(1); Self {
710 energy_network: MLP::new(&network_dims),
711 dimension,
712 temperature: 1.0,
713 mcmc_steps: 100,
714 }
715 }
716
717 pub fn train(&mut self, training_data: &Array2<f64>, num_epochs: usize) -> Result<(), String> {
719 for epoch in 0..num_epochs {
720 for i in 0..training_data.nrows() {
721 let positive_sample = training_data.row(i).to_owned();
722
723 let negative_sample = self.sample_mcmc(&positive_sample, self.mcmc_steps)?;
725
726 self.contrastive_divergence_update(&positive_sample, &negative_sample)?;
728 }
729
730 if epoch % 100 == 0 {
731 println!("Epoch {}: Training EBM...", epoch);
732 }
733 }
734
735 Ok(())
736 }
737
738 pub fn sample(
740 &self,
741 num_samples: usize,
742 num_steps: usize,
743 seed: u64,
744 ) -> Result<Array2<f64>, String> {
745 let mut rng = seeded_rng(seed);
746 let mut samples = Array2::zeros((num_samples, self.dimension));
747
748 for i in 0..num_samples {
749 let mut x = Array1::zeros(self.dimension);
751 for j in 0..self.dimension {
752 x[j] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
753 }
754
755 x = self.sample_mcmc(&x, num_steps)?;
757
758 for j in 0..self.dimension {
759 samples[[i, j]] = x[j];
760 }
761 }
762
763 Ok(samples)
764 }
765
766 fn sample_mcmc(&self, initial: &Array1<f64>, num_steps: usize) -> Result<Array1<f64>, String> {
768 let mut x = initial.clone();
769 let step_size = 0.01;
770 let mut rng = seeded_rng(42);
771
772 for _ in 0..num_steps {
773 let grad = self.energy_gradient(&x)?;
775
776 for i in 0..self.dimension {
778 let noise = rng.sample(
779 Normal::new(0.0, (2.0_f64 * step_size).sqrt()).expect("Operation failed"),
780 );
781 x[i] -= step_size * grad[i] + noise;
782 }
783 }
784
785 Ok(x)
786 }
787
788 fn energy_gradient(&self, x: &Array1<f64>) -> Result<Array1<f64>, String> {
790 let mut gradient = Array1::zeros(self.dimension);
791 let epsilon = 1e-5;
792
793 for i in 0..self.dimension {
794 let mut x_plus = x.clone();
795 let mut x_minus = x.clone();
796 x_plus[i] += epsilon;
797 x_minus[i] -= epsilon;
798
799 let energy_plus = self.energy_network.forward(&x_plus)?[0];
800 let energy_minus = self.energy_network.forward(&x_minus)?[0];
801
802 gradient[i] = (energy_plus - energy_minus) / (2.0 * epsilon);
803 }
804
805 Ok(gradient)
806 }
807
808 fn contrastive_divergence_update(
810 &mut self,
811 positive: &Array1<f64>,
812 negative: &Array1<f64>,
813 ) -> Result<(), String> {
814 let _pos_energy = self.energy_network.forward(positive)?;
816 let _neg_energy = self.energy_network.forward(negative)?;
817
818 Ok(())
822 }
823}
824
825#[derive(Debug)]
827pub struct NeuralPosteriorEstimation {
828 posterior_network: MLP,
829 prior_network: MLP,
830 observation_dim: usize,
831 parameter_dim: usize,
832 trained: bool,
833}
834
835impl NeuralPosteriorEstimation {
836 pub fn new(observation_dim: usize, parameter_dim: usize, hidden_dims: &[usize]) -> Self {
838 let mut posterior_dims = vec![observation_dim];
840 posterior_dims.extend_from_slice(hidden_dims);
841 posterior_dims.push(parameter_dim * 2); let mut prior_dims = vec![parameter_dim];
845 prior_dims.extend_from_slice(hidden_dims);
846 prior_dims.push(parameter_dim);
847
848 Self {
849 posterior_network: MLP::new(&posterior_dims),
850 prior_network: MLP::new(&prior_dims),
851 observation_dim,
852 parameter_dim,
853 trained: false,
854 }
855 }
856
857 pub fn train(
859 &mut self,
860 simulator: impl Fn(&Array1<f64>) -> Array1<f64>,
861 num_simulations: usize,
862 ) -> Result<(), String> {
863 let mut rng = seeded_rng(42);
864
865 for epoch in 0..1000 {
866 for _ in 0..num_simulations / 1000 {
867 let mut theta = Array1::zeros(self.parameter_dim);
869 for i in 0..self.parameter_dim {
870 theta[i] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
871 }
872
873 let x = simulator(&theta);
875
876 self.train_posterior_step(&x, &theta)?;
878 }
879
880 if epoch % 100 == 0 {
881 println!("Epoch {}: Training NPE...", epoch);
882 }
883 }
884
885 self.trained = true;
886 Ok(())
887 }
888
889 pub fn posterior(
891 &self,
892 observation: &Array1<f64>,
893 num_samples: usize,
894 seed: u64,
895 ) -> Result<Array2<f64>, String> {
896 if !self.trained {
897 return Err("Model must be trained before inference".to_string());
898 }
899
900 let posterior_params = self.posterior_network.forward(observation)?;
902
903 let mean_start = 0;
904 let var_start = self.parameter_dim;
905
906 let mut rng = seeded_rng(seed);
907 let mut samples = Array2::zeros((num_samples, self.parameter_dim));
908
909 for i in 0..num_samples {
910 for j in 0..self.parameter_dim {
911 let mean = posterior_params[mean_start + j];
912 let var = posterior_params[var_start + j].exp(); samples[[i, j]] =
915 rng.sample(Normal::new(mean, var.sqrt()).expect("Operation failed"));
916 }
917 }
918
919 Ok(samples)
920 }
921
922 fn train_posterior_step(
924 &mut self,
925 observation: &Array1<f64>,
926 true_parameter: &Array1<f64>,
927 ) -> Result<(), String> {
928 let _predicted_params = self.posterior_network.forward(observation)?;
930
931 Ok(())
935 }
936}
937
938trait LogProbability {
940 fn log_probability(&self, x: &[f64]) -> Result<f64, String>;
941}
942
943impl LogProbability for MultivariateNormal {
944 fn log_probability(&self, x: &[f64]) -> Result<f64, String> {
945 if x.len() != self.dimension() {
946 return Err("Dimension mismatch".to_string());
947 }
948
949 let mut log_prob = 0.0;
951 for &xi in x {
952 log_prob += -0.5 * xi * xi; }
954 log_prob += -0.5 * (x.len() as f64) * (2.0 * std::f64::consts::PI).ln();
955
956 Ok(log_prob)
957 }
958}
959
960#[cfg(test)]
961mod tests {
962 use super::*;
963 use approx::assert_relative_eq;
964
965 #[test]
966 fn test_normalizing_flow_creation() {
967 let flow = NormalizingFlow::new(5, 3);
968 assert_eq!(flow.dimension, 5);
969 assert_eq!(flow.num_layers, 3);
970 assert!(!flow.trained);
971 }
972
973 #[test]
974 fn test_diffusion_model_creation() {
975 let config = DiffusionConfig {
976 dimension: 10,
977 num_timesteps: 100,
978 beta_start: 1e-4,
979 beta_end: 0.02,
980 hidden_dims: vec![32, 64, 32],
981 };
982
983 let diffusion = ScoreBasedDiffusion::new(config);
984 assert_eq!(diffusion.config.dimension, 10);
985 assert_eq!(diffusion.config.num_timesteps, 100);
986 }
987
988 #[test]
989 fn test_energy_based_model() {
990 let ebm = EnergyBasedModel::new(5, &[32, 32]);
991 assert_eq!(ebm.dimension, 5);
992 assert_eq!(ebm.mcmc_steps, 100);
993 }
994
995 #[test]
996 fn test_neural_posterior_estimation() {
997 let npe = NeuralPosteriorEstimation::new(10, 5, &[32, 32]);
998 assert_eq!(npe.observation_dim, 10);
999 assert_eq!(npe.parameter_dim, 5);
1000 assert!(!npe.trained);
1001 }
1002
1003 #[test]
1004 fn test_mlp_forward() {
1005 let mlp = MLP::new(&[3, 5, 2]);
1006 let input = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1007 let output = mlp.forward(&input).expect("Operation failed");
1008 assert_eq!(output.len(), 2);
1009 }
1010
1011 #[test]
1012 fn test_noise_schedule() {
1013 let schedule = NoiseSchedule::new(100, 1e-4, 0.02);
1014 assert_eq!(schedule.betas.len(), 100);
1015 assert_eq!(schedule.alphas.len(), 100);
1016 assert_eq!(schedule.alpha_bars.len(), 100);
1017
1018 for i in 1..schedule.alpha_bars.len() {
1020 assert!(schedule.alpha_bars[i] <= schedule.alpha_bars[i - 1]);
1021 }
1022 }
1023}