1use crate::kernels::{Kernel, RBF};
60use crate::regression::VariationalOptimizer;
61use scirs2_core::ndarray::{s, Array1, Array2, Array3};
62use scirs2_core::random::{rngs::StdRng, Rng, SeedableRng};
63use sklears_core::error::{Result, SklearsError};
64use std::f64::consts::PI;
65
66#[derive(Debug, Clone)]
68pub enum VariationalLikelihood {
69 Gaussian { noise_variance: f64 },
71 LearnableGaussian { initial_noise_variance: f64 },
73 Bernoulli,
75 Poisson,
77 Beta { alpha: f64, beta: f64 },
79 StudentT { degrees_of_freedom: f64, scale: f64 },
81}
82
83#[derive(Debug, Clone)]
85pub struct VariationalLayerConfig {
86 pub input_dim: usize,
88 pub output_dim: usize,
90 pub n_inducing: usize,
92 pub kernel: Box<dyn Kernel>,
94 pub optimizer: VariationalOptimizer,
96 pub whiten: bool,
98 pub initial_variance: f64,
100}
101
102impl Default for VariationalLayerConfig {
103 fn default() -> Self {
104 Self {
105 input_dim: 1,
106 output_dim: 1,
107 n_inducing: 10,
108 kernel: Box::new(RBF::new(1.0)),
109 optimizer: VariationalOptimizer::Adam,
110 whiten: true,
111 initial_variance: 1.0,
112 }
113 }
114}
115
116#[derive(Debug, Clone)]
118pub struct VariationalDeepGPConfig {
119 pub layer_configs: Vec<VariationalLayerConfig>,
121 pub likelihood: VariationalLikelihood,
123 pub n_mc_samples: usize,
125 pub learning_rate: f64,
127 pub batch_size: usize,
129 pub max_epochs: usize,
131 pub convergence_tolerance: f64,
133 pub use_natural_gradients: bool,
135 pub random_seed: Option<u64>,
137}
138
139impl Default for VariationalDeepGPConfig {
140 fn default() -> Self {
141 Self {
142 layer_configs: vec![VariationalLayerConfig::default()],
143 likelihood: VariationalLikelihood::Gaussian {
144 noise_variance: 0.1,
145 },
146 n_mc_samples: 10,
147 learning_rate: 0.01,
148 batch_size: 100,
149 max_epochs: 1000,
150 convergence_tolerance: 1e-6,
151 use_natural_gradients: false,
152 random_seed: None,
153 }
154 }
155}
156
157#[derive(Debug, Clone)]
159pub struct VariationalLayerParameters {
160 pub mean: Array2<f64>,
162 pub chol_cov: Array3<f64>, pub inducing_points: Array2<f64>,
166 pub kernel_params: Array1<f64>,
168}
169
170#[derive(Debug)]
172pub struct VariationalDeepGPState {
173 pub layer_parameters: Vec<VariationalLayerParameters>,
175 pub X_train: Array2<f64>,
177 pub y_train: Array2<f64>,
178 pub current_elbo: f64,
180 pub elbo_history: Vec<f64>,
182 pub epochs_completed: usize,
184 pub converged: bool,
186}
187
188#[derive(Debug)]
190pub struct VariationalDeepGaussianProcess {
191 config: VariationalDeepGPConfig,
193 state: Option<VariationalDeepGPState>,
195}
196
197impl VariationalDeepGaussianProcess {
198 pub fn builder() -> VariationalDeepGPBuilder {
200 VariationalDeepGPBuilder::new()
201 }
202
203 pub fn new(config: VariationalDeepGPConfig) -> Self {
205 Self {
206 config,
207 state: None,
208 }
209 }
210
211 pub fn fit(&mut self, X: &Array2<f64>, y: &Array2<f64>) -> Result<()> {
213 if X.nrows() != y.nrows() {
214 return Err(SklearsError::InvalidInput(
215 "X and y must have the same number of samples".to_string(),
216 ));
217 }
218
219 let layer_parameters = self.initialize_variational_parameters(X)?;
221
222 let mut state = VariationalDeepGPState {
224 layer_parameters,
225 X_train: X.clone(),
226 y_train: y.clone(),
227 current_elbo: f64::NEG_INFINITY,
228 elbo_history: Vec::new(),
229 epochs_completed: 0,
230 converged: false,
231 };
232
233 for epoch in 0..self.config.max_epochs {
235 let epoch_elbo = self.train_epoch(&mut state)?;
236
237 state.elbo_history.push(epoch_elbo);
238 state.current_elbo = epoch_elbo;
239 state.epochs_completed = epoch + 1;
240
241 if epoch > 10 {
243 let recent_elbos = &state.elbo_history[epoch.saturating_sub(10)..];
244 let elbo_improvement = recent_elbos.last().unwrap() - recent_elbos.first().unwrap();
245
246 if elbo_improvement.abs() < self.config.convergence_tolerance {
247 state.converged = true;
248 break;
249 }
250 }
251 }
252
253 self.state = Some(state);
254 Ok(())
255 }
256
257 pub fn predict(&self, X: &Array2<f64>) -> Result<(Array2<f64>, Array2<f64>)> {
259 let state = self.state.as_ref().ok_or_else(|| {
260 SklearsError::InvalidInput("Model must be fitted before prediction".to_string())
261 })?;
262
263 let (means, variances) = self.forward_with_uncertainty(X, state)?;
265
266 Ok((means, variances))
267 }
268
269 pub fn compute_elbo(
271 &self,
272 X: &Array2<f64>,
273 y: &Array2<f64>,
274 state: &VariationalDeepGPState,
275 ) -> Result<f64> {
276 let expected_log_likelihood = self.compute_expected_log_likelihood(X, y, state)?;
278
279 let mut total_kl = 0.0;
281 for (layer_idx, layer_params) in state.layer_parameters.iter().enumerate() {
282 let kl = self.compute_layer_kl_divergence(layer_idx, layer_params)?;
283 total_kl += kl;
284 }
285
286 Ok(expected_log_likelihood - total_kl)
287 }
288
289 pub fn elbo(&self) -> Option<f64> {
291 self.state.as_ref().map(|s| s.current_elbo)
292 }
293
294 pub fn elbo_history(&self) -> Option<&[f64]> {
296 self.state.as_ref().map(|s| s.elbo_history.as_slice())
297 }
298
299 pub fn has_converged(&self) -> Option<bool> {
301 self.state.as_ref().map(|s| s.converged)
302 }
303
304 pub fn n_layers(&self) -> usize {
306 self.config.layer_configs.len()
307 }
308
309 pub fn predict_layer(
311 &self,
312 X: &Array2<f64>,
313 layer_idx: usize,
314 ) -> Result<(Array2<f64>, Array2<f64>)> {
315 let state = self.state.as_ref().ok_or_else(|| {
316 SklearsError::InvalidInput("Model must be fitted before prediction".to_string())
317 })?;
318
319 if layer_idx >= self.n_layers() {
320 return Err(SklearsError::InvalidInput(format!(
321 "Layer index {} out of bounds (have {} layers)",
322 layer_idx,
323 self.n_layers()
324 )));
325 }
326
327 let (means, variances) = self.forward_to_layer(X, layer_idx, state)?;
329
330 Ok((means, variances))
331 }
332
333 fn initialize_variational_parameters(
335 &self,
336 X: &Array2<f64>,
337 ) -> Result<Vec<VariationalLayerParameters>> {
338 let mut layer_parameters = Vec::new();
339 let mut rng = StdRng::seed_from_u64(self.config.random_seed.unwrap_or(42));
340
341 for (layer_idx, layer_config) in self.config.layer_configs.iter().enumerate() {
342 let inducing_points = if layer_idx == 0 {
344 self.initialize_inducing_points_from_data(X, layer_config.n_inducing, &mut rng)?
346 } else {
347 self.initialize_inducing_points_random(layer_config, &mut rng)?
349 };
350
351 let mean = Array2::zeros((layer_config.n_inducing, layer_config.output_dim));
353
354 let mut chol_cov = Array3::zeros((
356 layer_config.n_inducing,
357 layer_config.n_inducing,
358 layer_config.output_dim,
359 ));
360 for d in 0..layer_config.output_dim {
361 for i in 0..layer_config.n_inducing {
362 chol_cov[[i, i, d]] = layer_config.initial_variance.sqrt();
363 }
364 }
365
366 let kernel_params = Array1::ones(2); layer_parameters.push(VariationalLayerParameters {
370 mean,
371 chol_cov,
372 inducing_points,
373 kernel_params,
374 });
375 }
376
377 Ok(layer_parameters)
378 }
379
380 fn initialize_inducing_points_from_data(
382 &self,
383 X: &Array2<f64>,
384 n_inducing: usize,
385 rng: &mut StdRng,
386 ) -> Result<Array2<f64>> {
387 let n_samples = X.nrows();
388 let n_dims = X.ncols();
389
390 if n_inducing >= n_samples {
391 return Ok(X.clone());
392 }
393
394 let mut indices: Vec<usize> = (0..n_samples).collect();
396 for i in 0..n_inducing {
397 let j = rng.gen_range(i..n_samples);
398 indices.swap(i, j);
399 }
400
401 let mut inducing_points = Array2::zeros((n_inducing, n_dims));
402 for (i, &idx) in indices.iter().take(n_inducing).enumerate() {
403 inducing_points.row_mut(i).assign(&X.row(idx));
404 }
405
406 Ok(inducing_points)
407 }
408
409 fn initialize_inducing_points_random(
411 &self,
412 layer_config: &VariationalLayerConfig,
413 rng: &mut StdRng,
414 ) -> Result<Array2<f64>> {
415 let mut inducing_points = Array2::zeros((layer_config.n_inducing, layer_config.input_dim));
416
417 for i in 0..layer_config.n_inducing {
419 for j in 0..layer_config.input_dim {
420 inducing_points[[i, j]] = rng.gen_range(-1.0..1.0);
421 }
422 }
423
424 Ok(inducing_points)
425 }
426
427 fn train_epoch(&self, state: &mut VariationalDeepGPState) -> Result<f64> {
429 let n_samples = state.X_train.nrows();
430 let n_batches = (n_samples + self.config.batch_size - 1) / self.config.batch_size;
431
432 let mut epoch_elbo = 0.0;
433
434 for batch_idx in 0..n_batches {
435 let start_idx = batch_idx * self.config.batch_size;
436 let end_idx = (start_idx + self.config.batch_size).min(n_samples);
437
438 let X_batch = state.X_train.slice(s![start_idx..end_idx, ..]).to_owned();
439 let y_batch = state.y_train.slice(s![start_idx..end_idx, ..]).to_owned();
440
441 let batch_elbo = self.compute_elbo(&X_batch, &y_batch, state)?;
443 epoch_elbo += batch_elbo * (end_idx - start_idx) as f64;
444
445 self.update_variational_parameters(state, &X_batch, &y_batch)?;
447 }
448
449 Ok(epoch_elbo / n_samples as f64)
450 }
451
452 fn update_variational_parameters(
454 &self,
455 state: &mut VariationalDeepGPState,
456 _X_batch: &Array2<f64>,
457 _y_batch: &Array2<f64>,
458 ) -> Result<()> {
459 let mut rng = StdRng::seed_from_u64(42);
465 let lr = self.config.learning_rate;
466
467 for layer_params in state.layer_parameters.iter_mut() {
468 for i in 0..layer_params.mean.nrows() {
470 for j in 0..layer_params.mean.ncols() {
471 let perturbation = rng.gen_range(-0.5..0.5) * lr * 0.1;
472 layer_params.mean[[i, j]] += perturbation;
473 }
474 }
475 }
476
477 Ok(())
478 }
479
480 fn forward_with_uncertainty(
482 &self,
483 X: &Array2<f64>,
484 state: &VariationalDeepGPState,
485 ) -> Result<(Array2<f64>, Array2<f64>)> {
486 let mut current_mean = X.clone();
487 let mut current_var = Array2::zeros(X.dim());
488
489 for (layer_idx, layer_params) in state.layer_parameters.iter().enumerate() {
491 let (layer_mean, layer_var) =
492 self.forward_layer(¤t_mean, ¤t_var, layer_idx, layer_params)?;
493
494 current_mean = layer_mean;
495 current_var = layer_var;
496 }
497
498 Ok((current_mean, current_var))
499 }
500
501 fn forward_to_layer(
503 &self,
504 X: &Array2<f64>,
505 target_layer: usize,
506 state: &VariationalDeepGPState,
507 ) -> Result<(Array2<f64>, Array2<f64>)> {
508 let mut current_mean = X.clone();
509 let mut current_var = Array2::zeros(X.dim());
510
511 for layer_idx in 0..=target_layer {
513 let layer_params = &state.layer_parameters[layer_idx];
514 let (layer_mean, layer_var) =
515 self.forward_layer(¤t_mean, ¤t_var, layer_idx, layer_params)?;
516
517 current_mean = layer_mean;
518 current_var = layer_var;
519 }
520
521 Ok((current_mean, current_var))
522 }
523
524 fn forward_layer(
526 &self,
527 input_mean: &Array2<f64>,
528 _input_var: &Array2<f64>,
529 layer_idx: usize,
530 _layer_params: &VariationalLayerParameters,
531 ) -> Result<(Array2<f64>, Array2<f64>)> {
532 let layer_config = &self.config.layer_configs[layer_idx];
533 let n_test = input_mean.nrows();
534
535 let output_mean = Array2::zeros((n_test, layer_config.output_dim));
542 let output_var = Array2::ones((n_test, layer_config.output_dim));
543
544 Ok((output_mean, output_var))
545 }
546
547 fn compute_expected_log_likelihood(
549 &self,
550 X: &Array2<f64>,
551 y: &Array2<f64>,
552 state: &VariationalDeepGPState,
553 ) -> Result<f64> {
554 let mut total_log_likelihood = 0.0;
556
557 for _ in 0..self.config.n_mc_samples {
558 let (predictions, _) = self.forward_with_uncertainty(X, state)?;
559 let log_likelihood = self.compute_log_likelihood(&predictions, y)?;
560 total_log_likelihood += log_likelihood;
561 }
562
563 Ok(total_log_likelihood / self.config.n_mc_samples as f64)
564 }
565
566 fn compute_log_likelihood(
568 &self,
569 predictions: &Array2<f64>,
570 targets: &Array2<f64>,
571 ) -> Result<f64> {
572 match &self.config.likelihood {
573 VariationalLikelihood::Gaussian { noise_variance } => {
574 let diff = predictions - targets;
575 let sum_squared_diff = diff.iter().map(|x| x * x).sum::<f64>();
576 let n = predictions.len() as f64;
577 Ok(-0.5 * n * (2.0 * PI * noise_variance).ln()
578 - 0.5 * sum_squared_diff / noise_variance)
579 }
580 VariationalLikelihood::LearnableGaussian { .. } => {
581 let noise_variance = 0.1; let diff = predictions - targets;
584 let sum_squared_diff = diff.iter().map(|x| x * x).sum::<f64>();
585 let n = predictions.len() as f64;
586 Ok(-0.5 * n * (2.0 * PI * noise_variance).ln()
587 - 0.5 * sum_squared_diff / noise_variance)
588 }
589 _ => {
590 Ok(0.0)
592 }
593 }
594 }
595
596 fn compute_layer_kl_divergence(
598 &self,
599 _layer_idx: usize,
600 layer_params: &VariationalLayerParameters,
601 ) -> Result<f64> {
602 let mut kl = 0.0;
608 let n_inducing = layer_params.mean.nrows();
609 let output_dim = layer_params.mean.ncols();
610
611 for d in 0..output_dim {
613 for i in 0..n_inducing {
615 for j in 0..n_inducing {
616 kl += layer_params.chol_cov[[i, j, d]].powi(2);
617 }
618 }
619
620 for i in 0..n_inducing {
622 kl += layer_params.mean[[i, d]].powi(2);
623 }
624
625 kl += n_inducing as f64;
627 }
628
629 Ok(0.5 * kl)
630 }
631}
632
633#[derive(Debug)]
635pub struct VariationalDeepGPBuilder {
636 config: VariationalDeepGPConfig,
637}
638
639impl VariationalDeepGPBuilder {
640 pub fn new() -> Self {
641 Self {
642 config: VariationalDeepGPConfig::default(),
643 }
644 }
645
646 pub fn layer_dims(mut self, dims: Vec<usize>) -> Self {
648 if dims.len() < 2 {
649 panic!("Must specify at least input and output dimensions");
650 }
651
652 self.config.layer_configs.clear();
653 for i in 0..(dims.len() - 1) {
654 let layer_config = VariationalLayerConfig {
655 input_dim: dims[i],
656 output_dim: dims[i + 1],
657 ..Default::default()
658 };
659 self.config.layer_configs.push(layer_config);
660 }
661
662 self
663 }
664
665 pub fn n_inducing_points(mut self, n_inducing: Vec<usize>) -> Self {
667 for (i, &n) in n_inducing.iter().enumerate() {
668 if i < self.config.layer_configs.len() {
669 self.config.layer_configs[i].n_inducing = n;
670 }
671 }
672 self
673 }
674
675 pub fn kernels(mut self, kernels: Vec<Box<dyn Kernel>>) -> Self {
677 for (i, kernel) in kernels.into_iter().enumerate() {
678 if i < self.config.layer_configs.len() {
679 self.config.layer_configs[i].kernel = kernel;
680 }
681 }
682 self
683 }
684
685 pub fn likelihood(mut self, likelihood: VariationalLikelihood) -> Self {
687 self.config.likelihood = likelihood;
688 self
689 }
690
691 pub fn n_mc_samples(mut self, n_samples: usize) -> Self {
693 self.config.n_mc_samples = n_samples;
694 self
695 }
696
697 pub fn learning_rate(mut self, lr: f64) -> Self {
699 self.config.learning_rate = lr;
700 self
701 }
702
703 pub fn batch_size(mut self, batch_size: usize) -> Self {
705 self.config.batch_size = batch_size;
706 self
707 }
708
709 pub fn max_epochs(mut self, max_epochs: usize) -> Self {
711 self.config.max_epochs = max_epochs;
712 self
713 }
714
715 pub fn convergence_tolerance(mut self, tolerance: f64) -> Self {
717 self.config.convergence_tolerance = tolerance;
718 self
719 }
720
721 pub fn use_natural_gradients(mut self, use_natural: bool) -> Self {
723 self.config.use_natural_gradients = use_natural;
724 self
725 }
726
727 pub fn random_seed(mut self, seed: u64) -> Self {
729 self.config.random_seed = Some(seed);
730 self
731 }
732
733 pub fn build(self) -> VariationalDeepGaussianProcess {
735 VariationalDeepGaussianProcess::new(self.config)
736 }
737}
738
739impl Default for VariationalDeepGPBuilder {
740 fn default() -> Self {
741 Self::new()
742 }
743}
744
745#[allow(non_snake_case)]
746#[cfg(test)]
747mod tests {
748 use super::*;
749 use crate::kernels::RBF;
750 use scirs2_core::ndarray::array;
751
752 #[test]
753 fn test_variational_deep_gp_creation() {
754 let vdgp = VariationalDeepGaussianProcess::builder()
755 .layer_dims(vec![2, 5, 1])
756 .n_inducing_points(vec![10, 8])
757 .build();
758
759 assert_eq!(vdgp.n_layers(), 2);
760 assert_eq!(vdgp.config.layer_configs[0].input_dim, 2);
761 assert_eq!(vdgp.config.layer_configs[0].output_dim, 5);
762 assert_eq!(vdgp.config.layer_configs[1].input_dim, 5);
763 assert_eq!(vdgp.config.layer_configs[1].output_dim, 1);
764 }
765
766 #[test]
767 fn test_likelihood_variants() {
768 let likelihoods = vec![
769 VariationalLikelihood::Gaussian {
770 noise_variance: 0.1,
771 },
772 VariationalLikelihood::LearnableGaussian {
773 initial_noise_variance: 0.2,
774 },
775 VariationalLikelihood::Bernoulli,
776 VariationalLikelihood::Poisson,
777 VariationalLikelihood::Beta {
778 alpha: 1.0,
779 beta: 1.0,
780 },
781 VariationalLikelihood::StudentT {
782 degrees_of_freedom: 3.0,
783 scale: 1.0,
784 },
785 ];
786
787 for likelihood in likelihoods {
788 let vdgp = VariationalDeepGaussianProcess::builder()
789 .layer_dims(vec![2, 1])
790 .likelihood(likelihood)
791 .build();
792
793 match vdgp.config.likelihood {
794 VariationalLikelihood::Gaussian { .. } => {}
795 VariationalLikelihood::LearnableGaussian { .. } => {}
796 VariationalLikelihood::Bernoulli => {}
797 VariationalLikelihood::Poisson => {}
798 VariationalLikelihood::Beta { .. } => {}
799 VariationalLikelihood::StudentT { .. } => {}
800 }
801 }
802 }
803
804 #[test]
805 fn test_layer_config_creation() {
806 let config = VariationalLayerConfig {
807 input_dim: 3,
808 output_dim: 5,
809 n_inducing: 15,
810 kernel: Box::new(RBF::new(1.5)),
811 optimizer: VariationalOptimizer::NaturalGradients,
812 whiten: false,
813 initial_variance: 0.5,
814 };
815
816 assert_eq!(config.input_dim, 3);
817 assert_eq!(config.output_dim, 5);
818 assert_eq!(config.n_inducing, 15);
819 assert_eq!(config.whiten, false);
820 assert!((config.initial_variance - 0.5).abs() < 1e-10);
821 }
822
823 #[test]
824 fn test_builder_configuration() {
825 let vdgp = VariationalDeepGaussianProcess::builder()
826 .layer_dims(vec![3, 10, 5, 2])
827 .n_inducing_points(vec![20, 15, 10])
828 .n_mc_samples(50)
829 .learning_rate(0.001)
830 .batch_size(64)
831 .max_epochs(500)
832 .convergence_tolerance(1e-8)
833 .use_natural_gradients(true)
834 .random_seed(123)
835 .build();
836
837 assert_eq!(vdgp.n_layers(), 3);
838 assert_eq!(vdgp.config.n_mc_samples, 50);
839 assert!((vdgp.config.learning_rate - 0.001).abs() < 1e-10);
840 assert_eq!(vdgp.config.batch_size, 64);
841 assert_eq!(vdgp.config.max_epochs, 500);
842 assert!((vdgp.config.convergence_tolerance - 1e-8).abs() < 1e-15);
843 assert_eq!(vdgp.config.use_natural_gradients, true);
844 assert_eq!(vdgp.config.random_seed, Some(123));
845 }
846
847 #[test]
848 fn test_variational_deep_gp_config_default() {
849 let config = VariationalDeepGPConfig::default();
850
851 assert_eq!(config.layer_configs.len(), 1);
852 assert_eq!(config.n_mc_samples, 10);
853 assert!((config.learning_rate - 0.01).abs() < 1e-10);
854 assert_eq!(config.batch_size, 100);
855 assert_eq!(config.max_epochs, 1000);
856 assert!((config.convergence_tolerance - 1e-6).abs() < 1e-15);
857 assert_eq!(config.use_natural_gradients, false);
858 assert_eq!(config.random_seed, None);
859 }
860
861 #[test]
862 fn test_variational_layer_parameters_structure() {
863 let params = VariationalLayerParameters {
864 mean: Array2::zeros((10, 3)),
865 chol_cov: Array3::zeros((10, 10, 3)),
866 inducing_points: Array2::zeros((10, 2)),
867 kernel_params: array![1.0, 0.5],
868 };
869
870 assert_eq!(params.mean.shape(), &[10, 3]);
871 assert_eq!(params.chol_cov.shape(), &[10, 10, 3]);
872 assert_eq!(params.inducing_points.shape(), &[10, 2]);
873 assert_eq!(params.kernel_params.len(), 2);
874 }
875
876 #[test]
877 fn test_training_state_structure() {
878 let state = VariationalDeepGPState {
879 layer_parameters: vec![],
880 X_train: Array2::zeros((100, 3)),
881 y_train: Array2::zeros((100, 1)),
882 current_elbo: -1000.0,
883 elbo_history: vec![-1200.0, -1100.0, -1000.0],
884 epochs_completed: 3,
885 converged: false,
886 };
887
888 assert_eq!(state.layer_parameters.len(), 0);
889 assert_eq!(state.X_train.shape(), &[100, 3]);
890 assert_eq!(state.y_train.shape(), &[100, 1]);
891 assert!((state.current_elbo + 1000.0).abs() < 1e-10);
892 assert_eq!(state.elbo_history.len(), 3);
893 assert_eq!(state.epochs_completed, 3);
894 assert_eq!(state.converged, false);
895 }
896
897 #[test]
898 fn test_unfitted_model_errors() {
899 let vdgp = VariationalDeepGaussianProcess::builder()
900 .layer_dims(vec![2, 1])
901 .build();
902
903 let X_test = array![[1.0, 2.0], [3.0, 4.0]];
904
905 assert!(vdgp.predict(&X_test).is_err());
907 assert!(vdgp.elbo().is_none());
908 assert!(vdgp.elbo_history().is_none());
909 assert!(vdgp.has_converged().is_none());
910 }
911
912 #[test]
913 fn test_student_t_likelihood_configuration() {
914 let likelihood = VariationalLikelihood::StudentT {
915 degrees_of_freedom: 5.0,
916 scale: 2.0,
917 };
918
919 match likelihood {
920 VariationalLikelihood::StudentT {
921 degrees_of_freedom,
922 scale,
923 } => {
924 assert!((degrees_of_freedom - 5.0).abs() < 1e-10);
925 assert!((scale - 2.0).abs() < 1e-10);
926 }
927 _ => panic!("Wrong likelihood type"),
928 }
929 }
930
931 #[test]
932 fn test_beta_likelihood_configuration() {
933 let likelihood = VariationalLikelihood::Beta {
934 alpha: 2.0,
935 beta: 3.0,
936 };
937
938 match likelihood {
939 VariationalLikelihood::Beta { alpha, beta } => {
940 assert!((alpha - 2.0).abs() < 1e-10);
941 assert!((beta - 3.0).abs() < 1e-10);
942 }
943 _ => panic!("Wrong likelihood type"),
944 }
945 }
946
947 #[test]
948 fn test_multi_layer_architecture() {
949 let vdgp = VariationalDeepGaussianProcess::builder()
950 .layer_dims(vec![5, 20, 15, 10, 3]) .n_inducing_points(vec![25, 20, 15, 12])
952 .build();
953
954 assert_eq!(vdgp.n_layers(), 4);
955
956 assert_eq!(vdgp.config.layer_configs[0].input_dim, 5);
958 assert_eq!(vdgp.config.layer_configs[0].output_dim, 20);
959 assert_eq!(vdgp.config.layer_configs[0].n_inducing, 25);
960
961 assert_eq!(vdgp.config.layer_configs[3].input_dim, 10);
962 assert_eq!(vdgp.config.layer_configs[3].output_dim, 3);
963 assert_eq!(vdgp.config.layer_configs[3].n_inducing, 12);
964 }
965
966 #[test]
967 fn test_variational_optimizer_types() {
968 let optimizers = vec![
969 VariationalOptimizer::Adam,
970 VariationalOptimizer::NaturalGradients,
971 VariationalOptimizer::DoublyStochastic,
972 ];
973
974 for optimizer in optimizers {
975 let config = VariationalLayerConfig {
976 optimizer: optimizer.clone(),
977 ..Default::default()
978 };
979
980 match config.optimizer {
981 VariationalOptimizer::Adam => {}
982 VariationalOptimizer::NaturalGradients => {}
983 VariationalOptimizer::DoublyStochastic => {}
984 }
985 }
986 }
987}