1use crate::random::{
58 core::{seeded_rng, Random},
59 distributions::MultivariateNormal,
60 parallel::{ParallelRng, ThreadLocalRngPool},
61};
62use ::ndarray::{Array1, Array2, Axis};
63use rand::{Rng, RngExt};
64use rand_distr::{Distribution, Normal, Uniform};
65use std::collections::VecDeque;
66
67#[derive(Debug)]
73pub struct HamiltonianMonteCarlo {
74 step_size: f64,
75 num_leapfrog_steps: usize,
76 mass_matrix: Array2<f64>,
77 adapted_step_size: f64,
78 adaptation_window: usize,
79 target_acceptance_rate: f64,
80 acceptance_history: VecDeque<bool>,
81}
82
83impl HamiltonianMonteCarlo {
84 pub fn new(dimension: usize, step_size: f64, num_leapfrog_steps: usize) -> Self {
86 Self {
87 step_size,
88 num_leapfrog_steps,
89 mass_matrix: Array2::eye(dimension),
90 adapted_step_size: step_size,
91 adaptation_window: 100,
92 target_acceptance_rate: 0.8,
93 acceptance_history: VecDeque::new(),
94 }
95 }
96
97 pub fn with_mass_matrix(mut self, mass_matrix: Array2<f64>) -> Self {
99 self.mass_matrix = mass_matrix;
100 self
101 }
102
103 pub fn sample<F, G>(
105 &mut self,
106 log_density: F,
107 gradient: G,
108 initial_state: Array1<f64>,
109 num_samples: usize,
110 ) -> Result<Vec<Array1<f64>>, String>
111 where
112 F: Fn(&Array1<f64>) -> f64,
113 G: Fn(&Array1<f64>) -> Array1<f64>,
114 {
115 let mut rng = seeded_rng(42);
116 let mut current_state = initial_state;
117 let mut current_log_density = log_density(¤t_state);
118 let mut samples = Vec::with_capacity(num_samples);
119
120 for i in 0..num_samples {
121 let momentum = self.sample_momentum(&mut rng)?;
123
124 let (proposed_state, proposed_momentum) =
126 self.leapfrog_integration(¤t_state, &momentum, &gradient)?;
127
128 let proposed_log_density = log_density(&proposed_state);
130 let current_hamiltonian = -current_log_density + self.kinetic_energy(&momentum);
131 let proposed_hamiltonian =
132 -proposed_log_density + self.kinetic_energy(&proposed_momentum);
133
134 let log_acceptance_prob = -(proposed_hamiltonian - current_hamiltonian);
135 let accept = if log_acceptance_prob >= 0.0 {
136 true
137 } else {
138 (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
139 < log_acceptance_prob
140 };
141
142 if accept {
144 current_state = proposed_state;
145 current_log_density = proposed_log_density;
146 }
147
148 samples.push(current_state.clone());
149 self.acceptance_history.push_back(accept);
150
151 if i > 0 && i % 50 == 0 {
153 self.adapt_step_size();
154 }
155
156 if self.acceptance_history.len() > self.adaptation_window {
158 self.acceptance_history.pop_front();
159 }
160 }
161
162 Ok(samples)
163 }
164
165 fn sample_momentum(&self, rng: &mut Random<rand::rngs::StdRng>) -> Result<Array1<f64>, String> {
167 let dimension = self.mass_matrix.nrows();
168 let mut momentum = Array1::zeros(dimension);
169
170 for i in 0..dimension {
171 momentum[i] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
172 }
173
174 for i in 0..dimension {
176 momentum[i] *= self.mass_matrix[[i, i]].sqrt();
177 }
178
179 Ok(momentum)
180 }
181
182 fn leapfrog_integration<G>(
184 &self,
185 initial_position: &Array1<f64>,
186 initial_momentum: &Array1<f64>,
187 gradient: G,
188 ) -> Result<(Array1<f64>, Array1<f64>), String>
189 where
190 G: Fn(&Array1<f64>) -> Array1<f64>,
191 {
192 let mut position = initial_position.clone();
193 let mut momentum = initial_momentum.clone();
194
195 let grad = gradient(&position);
197 for i in 0..momentum.len() {
198 momentum[i] += 0.5 * self.adapted_step_size * grad[i];
199 }
200
201 for _ in 0..self.num_leapfrog_steps {
203 for i in 0..position.len() {
205 position[i] += self.adapted_step_size * momentum[i] / self.mass_matrix[[i, i]];
206 }
207
208 let grad = gradient(&position);
210 for i in 0..momentum.len() {
211 momentum[i] += self.adapted_step_size * grad[i];
212 }
213 }
214
215 let grad = gradient(&position);
217 for i in 0..momentum.len() {
218 momentum[i] += 0.5 * self.adapted_step_size * grad[i];
219 }
220
221 for i in 0..momentum.len() {
223 momentum[i] = -momentum[i];
224 }
225
226 Ok((position, momentum))
227 }
228
229 fn kinetic_energy(&self, momentum: &Array1<f64>) -> f64 {
231 let mut energy = 0.0;
232 for i in 0..momentum.len() {
233 energy += 0.5 * momentum[i] * momentum[i] / self.mass_matrix[[i, i]];
234 }
235 energy
236 }
237
238 fn adapt_step_size(&mut self) {
240 if self.acceptance_history.is_empty() {
241 return;
242 }
243
244 let acceptance_rate = self
245 .acceptance_history
246 .iter()
247 .map(|&accepted| if accepted { 1.0 } else { 0.0 })
248 .sum::<f64>()
249 / self.acceptance_history.len() as f64;
250
251 let adaptation_rate = 0.1;
252 if acceptance_rate > self.target_acceptance_rate {
253 self.adapted_step_size *= 1.0 + adaptation_rate;
254 } else {
255 self.adapted_step_size *= 1.0 - adaptation_rate;
256 }
257
258 self.adapted_step_size = self.adapted_step_size.max(1e-6).min(10.0);
260 }
261
262 pub fn acceptance_rate(&self) -> f64 {
264 if self.acceptance_history.is_empty() {
265 0.0
266 } else {
267 self.acceptance_history
268 .iter()
269 .map(|&accepted| if accepted { 1.0 } else { 0.0 })
270 .sum::<f64>()
271 / self.acceptance_history.len() as f64
272 }
273 }
274}
275
276#[derive(Debug)]
282pub struct NoUTurnSampler {
283 dimension: usize,
284 step_size: f64,
285 max_tree_depth: usize,
286 target_acceptance_rate: f64,
287 adaptation_phase_length: usize,
288 mass_matrix: Array2<f64>,
289}
290
291impl NoUTurnSampler {
292 pub fn new(dimension: usize) -> Self {
294 Self {
295 dimension,
296 step_size: 0.1,
297 max_tree_depth: 10,
298 target_acceptance_rate: 0.8,
299 adaptation_phase_length: 1000,
300 mass_matrix: Array2::eye(dimension),
301 }
302 }
303
304 pub fn sample_adaptive<F, G>(
306 &mut self,
307 log_density: F,
308 gradient: G,
309 initial_state: Array1<f64>,
310 num_samples: usize,
311 ) -> Result<Vec<Array1<f64>>, String>
312 where
313 F: Fn(&Array1<f64>) -> f64,
314 G: Fn(&Array1<f64>) -> Array1<f64>,
315 {
316 let mut rng = seeded_rng(42);
317 let mut current_state = initial_state;
318 let mut samples = Vec::with_capacity(num_samples);
319
320 let adaptation_samples = self.adaptation_phase_length.min(num_samples / 2);
321
322 for i in 0..num_samples {
323 let (new_state, _) =
325 self.build_tree(¤t_state, &log_density, &gradient, &mut rng)?;
326
327 current_state = new_state;
328 samples.push(current_state.clone());
329
330 if i < adaptation_samples {
332 if i > 0 && i % 50 == 0 {
334 self.adapt_parameters(&samples[i.saturating_sub(50)..]);
335 }
336 }
337 }
338
339 Ok(samples)
340 }
341
342 fn build_tree<F, G>(
344 &self,
345 initial_state: &Array1<f64>,
346 log_density: &F,
347 gradient: &G,
348 rng: &mut Random<rand::rngs::StdRng>,
349 ) -> Result<(Array1<f64>, bool), String>
350 where
351 F: Fn(&Array1<f64>) -> f64,
352 G: Fn(&Array1<f64>) -> Array1<f64>,
353 {
354 let mut momentum = Array1::zeros(self.dimension);
356 for i in 0..self.dimension {
357 momentum[i] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
358 }
359
360 let mut current_state = initial_state.clone();
362 let slice_u: f64 = rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"));
363 let log_slice_u =
364 slice_u.ln() + log_density(¤t_state) - self.kinetic_energy(&momentum);
365
366 for depth in 0..self.max_tree_depth {
368 let direction = if rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) < 0.5 {
370 -1.0
371 } else {
372 1.0
373 };
374
375 let (new_state, new_momentum) = self.leapfrog_step(
377 ¤t_state,
378 &momentum,
379 direction * self.step_size,
380 gradient,
381 )?;
382
383 let new_log_density = log_density(&new_state);
385 let new_hamiltonian = new_log_density - self.kinetic_energy(&new_momentum);
386
387 if new_hamiltonian > log_slice_u {
388 current_state = new_state;
389 break;
390 }
391
392 let dot_product = self.compute_dot_product(&momentum, &new_momentum);
394 if dot_product < 0.0 {
395 break;
396 }
397 }
398
399 Ok((current_state, true))
400 }
401
402 fn leapfrog_step<G>(
404 &self,
405 position: &Array1<f64>,
406 momentum: &Array1<f64>,
407 step_size: f64,
408 gradient: G,
409 ) -> Result<(Array1<f64>, Array1<f64>), String>
410 where
411 G: Fn(&Array1<f64>) -> Array1<f64>,
412 {
413 let mut new_momentum = momentum.clone();
414 let mut new_position = position.clone();
415
416 let grad = gradient(&new_position);
418 for i in 0..new_momentum.len() {
419 new_momentum[i] += 0.5 * step_size * grad[i];
420 }
421
422 for i in 0..new_position.len() {
424 new_position[i] += step_size * new_momentum[i];
425 }
426
427 let grad = gradient(&new_position);
429 for i in 0..new_momentum.len() {
430 new_momentum[i] += 0.5 * step_size * grad[i];
431 }
432
433 Ok((new_position, new_momentum))
434 }
435
436 fn kinetic_energy(&self, momentum: &Array1<f64>) -> f64 {
438 0.5 * momentum.iter().map(|&p| p * p).sum::<f64>()
439 }
440
441 fn compute_dot_product(&self, momentum1: &Array1<f64>, momentum2: &Array1<f64>) -> f64 {
443 momentum1
444 .iter()
445 .zip(momentum2.iter())
446 .map(|(&a, &b)| a * b)
447 .sum()
448 }
449
450 fn adapt_parameters(&mut self, recent_samples: &[Array1<f64>]) {
452 if recent_samples.len() < 10 {
453 return;
454 }
455
456 let mean = self.compute_sample_mean(recent_samples);
458 let covariance = self.compute_sample_covariance(recent_samples, &mean);
459
460 for i in 0..self.dimension {
462 if covariance[[i, i]] > 1e-10 {
463 self.mass_matrix[[i, i]] = covariance[[i, i]];
464 }
465 }
466 }
467
468 fn compute_sample_mean(&self, samples: &[Array1<f64>]) -> Array1<f64> {
470 let mut mean = Array1::zeros(self.dimension);
471 for sample in samples {
472 for i in 0..self.dimension {
473 mean[i] += sample[i];
474 }
475 }
476 for i in 0..self.dimension {
477 mean[i] /= samples.len() as f64;
478 }
479 mean
480 }
481
482 fn compute_sample_covariance(
484 &self,
485 samples: &[Array1<f64>],
486 mean: &Array1<f64>,
487 ) -> Array2<f64> {
488 let mut cov = Array2::zeros((self.dimension, self.dimension));
489 for sample in samples {
490 for i in 0..self.dimension {
491 for j in 0..self.dimension {
492 let diff_i = sample[i] - mean[i];
493 let diff_j = sample[j] - mean[j];
494 cov[[i, j]] += diff_i * diff_j;
495 }
496 }
497 }
498 for i in 0..self.dimension {
499 for j in 0..self.dimension {
500 cov[[i, j]] /= (samples.len() - 1) as f64;
501 }
502 }
503 cov
504 }
505}
506
507#[derive(Debug)]
512pub struct SteinVariationalGradientDescent {
513 num_particles: usize,
514 step_size: f64,
515 bandwidth_scale: f64,
516 particles: Array2<f64>,
517}
518
519impl SteinVariationalGradientDescent {
520 pub fn new(num_particles: usize, step_size: f64) -> Self {
522 Self {
523 num_particles,
524 step_size,
525 bandwidth_scale: 1.0,
526 particles: Array2::zeros((num_particles, 0)), }
528 }
529
530 pub fn initialize_particles(&mut self, dimension: usize, seed: u64) {
532 let mut rng = seeded_rng(seed);
533 self.particles = Array2::zeros((self.num_particles, dimension));
534
535 for i in 0..self.num_particles {
536 for j in 0..dimension {
537 self.particles[[i, j]] =
538 rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
539 }
540 }
541 }
542
543 pub fn optimize<F, G>(
545 &mut self,
546 log_density: F,
547 gradient: G,
548 initial_particles: Array2<f64>,
549 num_iterations: usize,
550 ) -> Result<Array2<f64>, String>
551 where
552 F: Fn(&Array1<f64>) -> f64,
553 G: Fn(&Array1<f64>) -> Array1<f64>,
554 {
555 self.particles = initial_particles;
556 let dimension = self.particles.ncols();
557
558 for iter in 0..num_iterations {
559 let bandwidth = self.compute_bandwidth();
561
562 for i in 0..self.num_particles {
564 let mut particle_update: Array1<f64> = Array1::zeros(dimension);
565
566 for j in 0..self.num_particles {
567 let (kernel_val, kernel_grad) = self.rbf_kernel_and_gradient(i, j, bandwidth);
569
570 let particle_j = self.particles.row(j).to_owned();
572
573 let grad_log_p = gradient(&particle_j);
575
576 for d in 0..dimension {
578 particle_update[d] += kernel_val * grad_log_p[d] + kernel_grad[d];
579 }
580 }
581
582 for d in 0..dimension {
584 self.particles[[i, d]] +=
585 self.step_size * particle_update[d] / self.num_particles as f64;
586 }
587 }
588
589 if iter > 0 && iter % 100 == 0 {
591 self.step_size *= 0.99; }
593 }
594
595 Ok(self.particles.clone())
596 }
597
598 fn rbf_kernel_and_gradient(&self, i: usize, j: usize, bandwidth: f64) -> (f64, Array1<f64>) {
600 let dimension = self.particles.ncols();
601 let mut diff = Array1::zeros(dimension);
602 let mut squared_distance = 0.0;
603
604 for d in 0..dimension {
605 diff[d] = self.particles[[i, d]] - self.particles[[j, d]];
606 squared_distance += diff[d] * diff[d];
607 }
608
609 let kernel_val = (-squared_distance / bandwidth).exp();
610 let mut kernel_grad = Array1::zeros(dimension);
611
612 for d in 0..dimension {
613 kernel_grad[d] = -2.0 * diff[d] * kernel_val / bandwidth;
614 }
615
616 (kernel_val, kernel_grad)
617 }
618
619 fn compute_bandwidth(&self) -> f64 {
621 let mut distances = Vec::new();
622
623 for i in 0..self.num_particles {
624 for j in (i + 1)..self.num_particles {
625 let mut dist_sq = 0.0;
626 for d in 0..self.particles.ncols() {
627 let diff = self.particles[[i, d]] - self.particles[[j, d]];
628 dist_sq += diff * diff;
629 }
630 distances.push(dist_sq.sqrt());
631 }
632 }
633
634 distances.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
635 let median_distance = if distances.is_empty() {
636 1.0
637 } else {
638 distances[distances.len() / 2]
639 };
640
641 self.bandwidth_scale * median_distance * median_distance
642 / (2.0 * (self.num_particles as f64).ln())
643 }
644
645 pub fn get_particles(&self) -> &Array2<f64> {
647 &self.particles
648 }
649
650 pub fn estimate_statistics(&self) -> (Array1<f64>, Array2<f64>) {
652 let dimension = self.particles.ncols();
653
654 let mut mean = Array1::zeros(dimension);
656 for i in 0..self.num_particles {
657 for j in 0..dimension {
658 mean[j] += self.particles[[i, j]];
659 }
660 }
661 for j in 0..dimension {
662 mean[j] /= self.num_particles as f64;
663 }
664
665 let mut covariance = Array2::zeros((dimension, dimension));
667 for i in 0..self.num_particles {
668 for j in 0..dimension {
669 for k in 0..dimension {
670 let diff_j = self.particles[[i, j]] - mean[j];
671 let diff_k = self.particles[[i, k]] - mean[k];
672 covariance[[j, k]] += diff_j * diff_k;
673 }
674 }
675 }
676 for j in 0..dimension {
677 for k in 0..dimension {
678 covariance[[j, k]] /= (self.num_particles - 1) as f64;
679 }
680 }
681
682 (mean, covariance)
683 }
684}
685
686#[derive(Debug)]
688pub struct EllipticalSliceSampler {
689 prior_covariance: Array2<f64>,
690 dimension: usize,
691}
692
693impl EllipticalSliceSampler {
694 pub fn new(prior_covariance: Array2<f64>) -> Self {
696 let dimension = prior_covariance.nrows();
697 Self {
698 prior_covariance,
699 dimension,
700 }
701 }
702
703 pub fn sample<F>(
705 &self,
706 log_likelihood: F,
707 initial_state: Array1<f64>,
708 num_samples: usize,
709 seed: u64,
710 ) -> Result<Vec<Array1<f64>>, String>
711 where
712 F: Fn(&Array1<f64>) -> f64,
713 {
714 let mut rng = seeded_rng(seed);
715 let mut current_state = initial_state;
716 let mut samples = Vec::with_capacity(num_samples);
717
718 let mvn = MultivariateNormal::new(
720 vec![0.0; self.dimension],
721 self.array_to_vec2d(&self.prior_covariance),
722 )
723 .map_err(|e| format!("Failed to create MVN: {}", e))?;
724
725 for _ in 0..num_samples {
726 let nu = Array1::from_vec(mvn.sample(&mut rng));
728
729 let log_y = log_likelihood(¤t_state)
731 + (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln();
732
733 let initial_theta = rng
735 .sample(Uniform::new(0.0, 2.0 * std::f64::consts::PI).expect("Operation failed"));
736 let mut theta_min = initial_theta - 2.0 * std::f64::consts::PI;
737 let mut theta_max = initial_theta;
738 let mut theta = initial_theta;
740
741 loop {
743 let cos_theta = theta.cos();
744 let sin_theta = theta.sin();
745
746 let mut proposal = Array1::zeros(self.dimension);
748 for i in 0..self.dimension {
749 proposal[i] = current_state[i] * cos_theta + nu[i] * sin_theta;
750 }
751
752 if log_likelihood(&proposal) > log_y {
754 current_state = proposal;
755 break;
756 }
757
758 if theta < initial_theta {
760 theta_min = theta;
761 } else {
762 theta_max = theta;
763 }
764
765 let new_theta =
767 rng.sample(Uniform::new(theta_min, theta_max).expect("Operation failed"));
768 if (new_theta - theta).abs() < 1e-10 {
769 break;
771 }
772 theta = new_theta;
774 }
775
776 samples.push(current_state.clone());
777 }
778
779 Ok(samples)
780 }
781
782 fn array_to_vec2d(&self, array: &Array2<f64>) -> Vec<Vec<f64>> {
784 array.rows().into_iter().map(|row| row.to_vec()).collect()
785 }
786}
787
788#[derive(Debug)]
790pub struct ParallelTempering {
791 num_chains: usize,
792 temperatures: Vec<f64>,
793 swap_frequency: usize,
794 chains: Vec<Array1<f64>>,
795}
796
797impl ParallelTempering {
798 pub fn new(num_chains: usize, max_temperature: f64) -> Self {
800 let temperatures: Vec<f64> = (0..num_chains)
802 .map(|i| (max_temperature / 1.0).powf(i as f64 / (num_chains - 1) as f64))
803 .collect();
804
805 Self {
806 num_chains,
807 temperatures,
808 swap_frequency: 10,
809 chains: Vec::new(),
810 }
811 }
812
813 pub fn sample<F>(
815 &mut self,
816 log_density: F,
817 initial_states: Vec<Array1<f64>>,
818 num_samples: usize,
819 seed: u64,
820 ) -> Result<Vec<Array1<f64>>, String>
821 where
822 F: Fn(&Array1<f64>) -> f64 + Send + Sync,
823 {
824 if initial_states.len() != self.num_chains {
825 return Err("Number of initial states must match number of chains".to_string());
826 }
827
828 self.chains = initial_states;
829 let mut samples = Vec::new();
830 let mut rng = seeded_rng(seed);
831
832 for iter in 0..num_samples {
833 for chain_idx in 0..self.num_chains {
835 let temperature = self.temperatures[chain_idx];
836 self.metropolis_update(chain_idx, temperature, &log_density, &mut rng)?;
837 }
838
839 if iter % self.swap_frequency == 0 {
841 self.attempt_swaps(&log_density, &mut rng)?;
842 }
843
844 samples.push(self.chains[0].clone());
846 }
847
848 Ok(samples)
849 }
850
851 fn metropolis_update<F>(
853 &mut self,
854 chain_idx: usize,
855 temperature: f64,
856 log_density: &F,
857 rng: &mut Random<rand::rngs::StdRng>,
858 ) -> Result<(), String>
859 where
860 F: Fn(&Array1<f64>) -> f64,
861 {
862 let current_state = &self.chains[chain_idx];
863 let dimension = current_state.len();
864
865 let mut proposal = current_state.clone();
867 let step_size = 0.1 * temperature.sqrt();
868 for i in 0..dimension {
869 proposal[i] += rng.sample(Normal::new(0.0, step_size).expect("Operation failed"));
870 }
871
872 let current_log_density = log_density(current_state);
874 let proposal_log_density = log_density(&proposal);
875
876 let log_acceptance_prob = (proposal_log_density - current_log_density) / temperature;
877
878 if log_acceptance_prob >= 0.0
880 || (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
881 < log_acceptance_prob
882 {
883 self.chains[chain_idx] = proposal;
884 }
885
886 Ok(())
887 }
888
889 fn attempt_swaps<F>(
891 &mut self,
892 log_density: &F,
893 rng: &mut Random<rand::rngs::StdRng>,
894 ) -> Result<(), String>
895 where
896 F: Fn(&Array1<f64>) -> f64,
897 {
898 for i in 0..(self.num_chains - 1) {
899 let temp_i = self.temperatures[i];
900 let temp_j = self.temperatures[i + 1];
901
902 let log_density_i = log_density(&self.chains[i]);
903 let log_density_j = log_density(&self.chains[i + 1]);
904
905 let log_swap_prob = (log_density_j - log_density_i) * (1.0 / temp_i - 1.0 / temp_j);
907
908 if log_swap_prob >= 0.0
910 || (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
911 < log_swap_prob
912 {
913 self.chains.swap(i, i + 1);
914 }
915 }
916
917 Ok(())
918 }
919
920 pub fn get_chain_states(&self) -> &[Array1<f64>] {
922 &self.chains
923 }
924
925 pub fn get_temperatures(&self) -> &[f64] {
927 &self.temperatures
928 }
929}
930
931#[cfg(test)]
932mod tests {
933 use super::*;
934 use approx::assert_relative_eq;
935
936 #[test]
937 fn test_hmc_basic() {
938 let mut hmc = HamiltonianMonteCarlo::new(2, 0.1, 10);
939
940 let samples = hmc
941 .sample(
942 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)), |x| Array1::from_vec(vec![-x[0], -x[1]]), Array1::from_vec(vec![0.0, 0.0]),
945 100,
946 )
947 .expect("Operation failed");
948
949 assert_eq!(samples.len(), 100);
950
951 let mean_x: f64 = samples.iter().map(|s| s[0]).sum::<f64>() / samples.len() as f64;
953 let mean_y: f64 = samples.iter().map(|s| s[1]).sum::<f64>() / samples.len() as f64;
954
955 assert_relative_eq!(mean_x, 0.0, epsilon = 0.5);
956 assert_relative_eq!(mean_y, 0.0, epsilon = 0.5);
957 }
958
959 #[test]
960 fn test_nuts_basic() {
961 let mut nuts = NoUTurnSampler::new(2);
962
963 let samples = nuts
964 .sample_adaptive(
965 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
966 |x| Array1::from_vec(vec![-x[0], -x[1]]),
967 Array1::from_vec(vec![0.0, 0.0]),
968 100,
969 )
970 .expect("Operation failed");
971
972 assert_eq!(samples.len(), 100);
973 }
974
975 #[test]
976 fn test_svgd_basic() {
977 let mut svgd = SteinVariationalGradientDescent::new(50, 0.1);
978
979 let mut initial_particles = Array2::zeros((50, 2));
981 let mut rng = seeded_rng(42);
982 for i in 0..50 {
983 for j in 0..2 {
984 initial_particles[[i, j]] =
985 rng.sample(Normal::new(0.0, 2.0).expect("Operation failed"));
986 }
987 }
988
989 let final_particles = svgd
990 .optimize(
991 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
992 |x| Array1::from_vec(vec![-x[0], -x[1]]),
993 initial_particles,
994 100,
995 )
996 .expect("Operation failed");
997
998 assert_eq!(final_particles.nrows(), 50);
999 assert_eq!(final_particles.ncols(), 2);
1000
1001 let (mean, _) = svgd.estimate_statistics();
1003 assert_relative_eq!(mean[0], 0.0, epsilon = 0.5);
1004 assert_relative_eq!(mean[1], 0.0, epsilon = 0.5);
1005 }
1006
1007 #[test]
1008 fn test_elliptical_slice_sampling() {
1009 let prior_cov = Array2::eye(2);
1010 let ess = EllipticalSliceSampler::new(prior_cov);
1011
1012 let samples = ess
1013 .sample(
1014 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)), Array1::from_vec(vec![0.0, 0.0]),
1016 50,
1017 42,
1018 )
1019 .expect("Operation failed");
1020
1021 assert_eq!(samples.len(), 50);
1022 }
1023
1024 #[test]
1025 fn test_parallel_tempering() {
1026 let mut pt = ParallelTempering::new(4, 10.0);
1027
1028 let initial_states = vec![
1029 Array1::from_vec(vec![0.0, 0.0]),
1030 Array1::from_vec(vec![1.0, 1.0]),
1031 Array1::from_vec(vec![-1.0, -1.0]),
1032 Array1::from_vec(vec![0.0, 1.0]),
1033 ];
1034
1035 let samples = pt
1036 .sample(
1037 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
1038 initial_states,
1039 100,
1040 42,
1041 )
1042 .expect("Operation failed");
1043
1044 assert_eq!(samples.len(), 100);
1045 assert_eq!(pt.get_temperatures().len(), 4);
1046 }
1047}