1use crate::random::{
58 core::{seeded_rng, Random},
59 distributions::MultivariateNormal,
60 parallel::{ParallelRng, ThreadLocalRngPool},
61};
62use ::ndarray::{Array1, Array2, Axis};
63use rand::Rng;
64use rand_distr::{Distribution, Normal, Uniform};
65use std::collections::VecDeque;
66
67#[derive(Debug)]
73pub struct HamiltonianMonteCarlo {
74 step_size: f64,
75 num_leapfrog_steps: usize,
76 mass_matrix: Array2<f64>,
77 adapted_step_size: f64,
78 adaptation_window: usize,
79 target_acceptance_rate: f64,
80 acceptance_history: VecDeque<bool>,
81}
82
83impl HamiltonianMonteCarlo {
84 pub fn new(dimension: usize, step_size: f64, num_leapfrog_steps: usize) -> Self {
86 Self {
87 step_size,
88 num_leapfrog_steps,
89 mass_matrix: Array2::eye(dimension),
90 adapted_step_size: step_size,
91 adaptation_window: 100,
92 target_acceptance_rate: 0.8,
93 acceptance_history: VecDeque::new(),
94 }
95 }
96
97 pub fn with_mass_matrix(mut self, mass_matrix: Array2<f64>) -> Self {
99 self.mass_matrix = mass_matrix;
100 self
101 }
102
103 pub fn sample<F, G>(
105 &mut self,
106 log_density: F,
107 gradient: G,
108 initial_state: Array1<f64>,
109 num_samples: usize,
110 ) -> Result<Vec<Array1<f64>>, String>
111 where
112 F: Fn(&Array1<f64>) -> f64,
113 G: Fn(&Array1<f64>) -> Array1<f64>,
114 {
115 let mut rng = seeded_rng(42);
116 let mut current_state = initial_state;
117 let mut current_log_density = log_density(¤t_state);
118 let mut samples = Vec::with_capacity(num_samples);
119
120 for i in 0..num_samples {
121 let momentum = self.sample_momentum(&mut rng)?;
123
124 let (proposed_state, proposed_momentum) =
126 self.leapfrog_integration(¤t_state, &momentum, &gradient)?;
127
128 let proposed_log_density = log_density(&proposed_state);
130 let current_hamiltonian = -current_log_density + self.kinetic_energy(&momentum);
131 let proposed_hamiltonian =
132 -proposed_log_density + self.kinetic_energy(&proposed_momentum);
133
134 let log_acceptance_prob = -(proposed_hamiltonian - current_hamiltonian);
135 let accept = if log_acceptance_prob >= 0.0 {
136 true
137 } else {
138 (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
139 < log_acceptance_prob
140 };
141
142 if accept {
144 current_state = proposed_state;
145 current_log_density = proposed_log_density;
146 }
147
148 samples.push(current_state.clone());
149 self.acceptance_history.push_back(accept);
150
151 if i > 0 && i % 50 == 0 {
153 self.adapt_step_size();
154 }
155
156 if self.acceptance_history.len() > self.adaptation_window {
158 self.acceptance_history.pop_front();
159 }
160 }
161
162 Ok(samples)
163 }
164
165 fn sample_momentum(&self, rng: &mut Random<rand::rngs::StdRng>) -> Result<Array1<f64>, String> {
167 let dimension = self.mass_matrix.nrows();
168 let mut momentum = Array1::zeros(dimension);
169
170 for i in 0..dimension {
171 momentum[i] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
172 }
173
174 for i in 0..dimension {
176 momentum[i] *= self.mass_matrix[[i, i]].sqrt();
177 }
178
179 Ok(momentum)
180 }
181
182 fn leapfrog_integration<G>(
184 &self,
185 initial_position: &Array1<f64>,
186 initial_momentum: &Array1<f64>,
187 gradient: G,
188 ) -> Result<(Array1<f64>, Array1<f64>), String>
189 where
190 G: Fn(&Array1<f64>) -> Array1<f64>,
191 {
192 let mut position = initial_position.clone();
193 let mut momentum = initial_momentum.clone();
194
195 let grad = gradient(&position);
197 for i in 0..momentum.len() {
198 momentum[i] += 0.5 * self.adapted_step_size * grad[i];
199 }
200
201 for _ in 0..self.num_leapfrog_steps {
203 for i in 0..position.len() {
205 position[i] += self.adapted_step_size * momentum[i] / self.mass_matrix[[i, i]];
206 }
207
208 let grad = gradient(&position);
210 for i in 0..momentum.len() {
211 momentum[i] += self.adapted_step_size * grad[i];
212 }
213 }
214
215 let grad = gradient(&position);
217 for i in 0..momentum.len() {
218 momentum[i] += 0.5 * self.adapted_step_size * grad[i];
219 }
220
221 for i in 0..momentum.len() {
223 momentum[i] = -momentum[i];
224 }
225
226 Ok((position, momentum))
227 }
228
229 fn kinetic_energy(&self, momentum: &Array1<f64>) -> f64 {
231 let mut energy = 0.0;
232 for i in 0..momentum.len() {
233 energy += 0.5 * momentum[i] * momentum[i] / self.mass_matrix[[i, i]];
234 }
235 energy
236 }
237
238 fn adapt_step_size(&mut self) {
240 if self.acceptance_history.is_empty() {
241 return;
242 }
243
244 let acceptance_rate = self
245 .acceptance_history
246 .iter()
247 .map(|&accepted| if accepted { 1.0 } else { 0.0 })
248 .sum::<f64>()
249 / self.acceptance_history.len() as f64;
250
251 let adaptation_rate = 0.1;
252 if acceptance_rate > self.target_acceptance_rate {
253 self.adapted_step_size *= 1.0 + adaptation_rate;
254 } else {
255 self.adapted_step_size *= 1.0 - adaptation_rate;
256 }
257
258 self.adapted_step_size = self.adapted_step_size.max(1e-6).min(10.0);
260 }
261
262 pub fn acceptance_rate(&self) -> f64 {
264 if self.acceptance_history.is_empty() {
265 0.0
266 } else {
267 self.acceptance_history
268 .iter()
269 .map(|&accepted| if accepted { 1.0 } else { 0.0 })
270 .sum::<f64>()
271 / self.acceptance_history.len() as f64
272 }
273 }
274}
275
276#[derive(Debug)]
282pub struct NoUTurnSampler {
283 dimension: usize,
284 step_size: f64,
285 max_tree_depth: usize,
286 target_acceptance_rate: f64,
287 adaptation_phase_length: usize,
288 mass_matrix: Array2<f64>,
289}
290
291impl NoUTurnSampler {
292 pub fn new(dimension: usize) -> Self {
294 Self {
295 dimension,
296 step_size: 0.1,
297 max_tree_depth: 10,
298 target_acceptance_rate: 0.8,
299 adaptation_phase_length: 1000,
300 mass_matrix: Array2::eye(dimension),
301 }
302 }
303
304 pub fn sample_adaptive<F, G>(
306 &mut self,
307 log_density: F,
308 gradient: G,
309 initial_state: Array1<f64>,
310 num_samples: usize,
311 ) -> Result<Vec<Array1<f64>>, String>
312 where
313 F: Fn(&Array1<f64>) -> f64,
314 G: Fn(&Array1<f64>) -> Array1<f64>,
315 {
316 let mut rng = seeded_rng(42);
317 let mut current_state = initial_state;
318 let mut samples = Vec::with_capacity(num_samples);
319
320 let adaptation_samples = self.adaptation_phase_length.min(num_samples / 2);
321
322 for i in 0..num_samples {
323 let (new_state, _) =
325 self.build_tree(¤t_state, &log_density, &gradient, &mut rng)?;
326
327 current_state = new_state;
328 samples.push(current_state.clone());
329
330 if i < adaptation_samples {
332 if i > 0 && i % 50 == 0 {
334 self.adapt_parameters(&samples[i.saturating_sub(50)..]);
335 }
336 }
337 }
338
339 Ok(samples)
340 }
341
342 fn build_tree<F, G>(
344 &self,
345 initial_state: &Array1<f64>,
346 log_density: &F,
347 gradient: &G,
348 rng: &mut Random<rand::rngs::StdRng>,
349 ) -> Result<(Array1<f64>, bool), String>
350 where
351 F: Fn(&Array1<f64>) -> f64,
352 G: Fn(&Array1<f64>) -> Array1<f64>,
353 {
354 let mut momentum = Array1::zeros(self.dimension);
356 for i in 0..self.dimension {
357 momentum[i] = rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
358 }
359
360 let mut current_state = initial_state.clone();
362 let slice_u: f64 = rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed"));
363 let log_slice_u =
364 slice_u.ln() + log_density(¤t_state) - self.kinetic_energy(&momentum);
365
366 for depth in 0..self.max_tree_depth {
368 let direction = if rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) < 0.5 {
370 -1.0
371 } else {
372 1.0
373 };
374
375 let (new_state, new_momentum) = self.leapfrog_step(
377 ¤t_state,
378 &momentum,
379 direction * self.step_size,
380 gradient,
381 )?;
382
383 let new_log_density = log_density(&new_state);
385 let new_hamiltonian = new_log_density - self.kinetic_energy(&new_momentum);
386
387 if new_hamiltonian > log_slice_u {
388 current_state = new_state;
389 break;
390 }
391
392 let dot_product = self.compute_dot_product(&momentum, &new_momentum);
394 if dot_product < 0.0 {
395 break;
396 }
397 }
398
399 Ok((current_state, true))
400 }
401
402 fn leapfrog_step<G>(
404 &self,
405 position: &Array1<f64>,
406 momentum: &Array1<f64>,
407 step_size: f64,
408 gradient: G,
409 ) -> Result<(Array1<f64>, Array1<f64>), String>
410 where
411 G: Fn(&Array1<f64>) -> Array1<f64>,
412 {
413 let mut new_momentum = momentum.clone();
414 let mut new_position = position.clone();
415
416 let grad = gradient(&new_position);
418 for i in 0..new_momentum.len() {
419 new_momentum[i] += 0.5 * step_size * grad[i];
420 }
421
422 for i in 0..new_position.len() {
424 new_position[i] += step_size * new_momentum[i];
425 }
426
427 let grad = gradient(&new_position);
429 for i in 0..new_momentum.len() {
430 new_momentum[i] += 0.5 * step_size * grad[i];
431 }
432
433 Ok((new_position, new_momentum))
434 }
435
436 fn kinetic_energy(&self, momentum: &Array1<f64>) -> f64 {
438 0.5 * momentum.iter().map(|&p| p * p).sum::<f64>()
439 }
440
441 fn compute_dot_product(&self, momentum1: &Array1<f64>, momentum2: &Array1<f64>) -> f64 {
443 momentum1
444 .iter()
445 .zip(momentum2.iter())
446 .map(|(&a, &b)| a * b)
447 .sum()
448 }
449
450 fn adapt_parameters(&mut self, recent_samples: &[Array1<f64>]) {
452 if recent_samples.len() < 10 {
453 return;
454 }
455
456 let mean = self.compute_sample_mean(recent_samples);
458 let covariance = self.compute_sample_covariance(recent_samples, &mean);
459
460 for i in 0..self.dimension {
462 if covariance[[i, i]] > 1e-10 {
463 self.mass_matrix[[i, i]] = covariance[[i, i]];
464 }
465 }
466 }
467
468 fn compute_sample_mean(&self, samples: &[Array1<f64>]) -> Array1<f64> {
470 let mut mean = Array1::zeros(self.dimension);
471 for sample in samples {
472 for i in 0..self.dimension {
473 mean[i] += sample[i];
474 }
475 }
476 for i in 0..self.dimension {
477 mean[i] /= samples.len() as f64;
478 }
479 mean
480 }
481
482 fn compute_sample_covariance(
484 &self,
485 samples: &[Array1<f64>],
486 mean: &Array1<f64>,
487 ) -> Array2<f64> {
488 let mut cov = Array2::zeros((self.dimension, self.dimension));
489 for sample in samples {
490 for i in 0..self.dimension {
491 for j in 0..self.dimension {
492 let diff_i = sample[i] - mean[i];
493 let diff_j = sample[j] - mean[j];
494 cov[[i, j]] += diff_i * diff_j;
495 }
496 }
497 }
498 for i in 0..self.dimension {
499 for j in 0..self.dimension {
500 cov[[i, j]] /= (samples.len() - 1) as f64;
501 }
502 }
503 cov
504 }
505}
506
507#[derive(Debug)]
512pub struct SteinVariationalGradientDescent {
513 num_particles: usize,
514 step_size: f64,
515 bandwidth_scale: f64,
516 particles: Array2<f64>,
517}
518
519impl SteinVariationalGradientDescent {
520 pub fn new(num_particles: usize, step_size: f64) -> Self {
522 Self {
523 num_particles,
524 step_size,
525 bandwidth_scale: 1.0,
526 particles: Array2::zeros((num_particles, 0)), }
528 }
529
530 pub fn initialize_particles(&mut self, dimension: usize, seed: u64) {
532 let mut rng = seeded_rng(seed);
533 self.particles = Array2::zeros((self.num_particles, dimension));
534
535 for i in 0..self.num_particles {
536 for j in 0..dimension {
537 self.particles[[i, j]] =
538 rng.sample(Normal::new(0.0, 1.0).expect("Operation failed"));
539 }
540 }
541 }
542
543 pub fn optimize<F, G>(
545 &mut self,
546 log_density: F,
547 gradient: G,
548 initial_particles: Array2<f64>,
549 num_iterations: usize,
550 ) -> Result<Array2<f64>, String>
551 where
552 F: Fn(&Array1<f64>) -> f64,
553 G: Fn(&Array1<f64>) -> Array1<f64>,
554 {
555 self.particles = initial_particles;
556 let dimension = self.particles.ncols();
557
558 for iter in 0..num_iterations {
559 let bandwidth = self.compute_bandwidth();
561
562 for i in 0..self.num_particles {
564 let mut particle_update: Array1<f64> = Array1::zeros(dimension);
565
566 for j in 0..self.num_particles {
567 let (kernel_val, kernel_grad) = self.rbf_kernel_and_gradient(i, j, bandwidth);
569
570 let particle_j = self.particles.row(j).to_owned();
572
573 let grad_log_p = gradient(&particle_j);
575
576 for d in 0..dimension {
578 particle_update[d] += kernel_val * grad_log_p[d] + kernel_grad[d];
579 }
580 }
581
582 for d in 0..dimension {
584 self.particles[[i, d]] +=
585 self.step_size * particle_update[d] / self.num_particles as f64;
586 }
587 }
588
589 if iter > 0 && iter % 100 == 0 {
591 self.step_size *= 0.99; }
593 }
594
595 Ok(self.particles.clone())
596 }
597
598 fn rbf_kernel_and_gradient(&self, i: usize, j: usize, bandwidth: f64) -> (f64, Array1<f64>) {
600 let dimension = self.particles.ncols();
601 let mut diff = Array1::zeros(dimension);
602 let mut squared_distance = 0.0;
603
604 for d in 0..dimension {
605 diff[d] = self.particles[[i, d]] - self.particles[[j, d]];
606 squared_distance += diff[d] * diff[d];
607 }
608
609 let kernel_val = (-squared_distance / bandwidth).exp();
610 let mut kernel_grad = Array1::zeros(dimension);
611
612 for d in 0..dimension {
613 kernel_grad[d] = -2.0 * diff[d] * kernel_val / bandwidth;
614 }
615
616 (kernel_val, kernel_grad)
617 }
618
619 fn compute_bandwidth(&self) -> f64 {
621 let mut distances = Vec::new();
622
623 for i in 0..self.num_particles {
624 for j in (i + 1)..self.num_particles {
625 let mut dist_sq = 0.0;
626 for d in 0..self.particles.ncols() {
627 let diff = self.particles[[i, d]] - self.particles[[j, d]];
628 dist_sq += diff * diff;
629 }
630 distances.push(dist_sq.sqrt());
631 }
632 }
633
634 distances.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
635 let median_distance = if distances.is_empty() {
636 1.0
637 } else {
638 distances[distances.len() / 2]
639 };
640
641 self.bandwidth_scale * median_distance * median_distance
642 / (2.0 * (self.num_particles as f64).ln())
643 }
644
645 pub fn get_particles(&self) -> &Array2<f64> {
647 &self.particles
648 }
649
650 pub fn estimate_statistics(&self) -> (Array1<f64>, Array2<f64>) {
652 let dimension = self.particles.ncols();
653
654 let mut mean = Array1::zeros(dimension);
656 for i in 0..self.num_particles {
657 for j in 0..dimension {
658 mean[j] += self.particles[[i, j]];
659 }
660 }
661 for j in 0..dimension {
662 mean[j] /= self.num_particles as f64;
663 }
664
665 let mut covariance = Array2::zeros((dimension, dimension));
667 for i in 0..self.num_particles {
668 for j in 0..dimension {
669 for k in 0..dimension {
670 let diff_j = self.particles[[i, j]] - mean[j];
671 let diff_k = self.particles[[i, k]] - mean[k];
672 covariance[[j, k]] += diff_j * diff_k;
673 }
674 }
675 }
676 for j in 0..dimension {
677 for k in 0..dimension {
678 covariance[[j, k]] /= (self.num_particles - 1) as f64;
679 }
680 }
681
682 (mean, covariance)
683 }
684}
685
686#[derive(Debug)]
688pub struct EllipticalSliceSampler {
689 prior_covariance: Array2<f64>,
690 dimension: usize,
691}
692
693impl EllipticalSliceSampler {
694 pub fn new(prior_covariance: Array2<f64>) -> Self {
696 let dimension = prior_covariance.nrows();
697 Self {
698 prior_covariance,
699 dimension,
700 }
701 }
702
703 pub fn sample<F>(
705 &self,
706 log_likelihood: F,
707 initial_state: Array1<f64>,
708 num_samples: usize,
709 seed: u64,
710 ) -> Result<Vec<Array1<f64>>, String>
711 where
712 F: Fn(&Array1<f64>) -> f64,
713 {
714 let mut rng = seeded_rng(seed);
715 let mut current_state = initial_state;
716 let mut samples = Vec::with_capacity(num_samples);
717
718 let mvn = MultivariateNormal::new(
720 vec![0.0; self.dimension],
721 self.array_to_vec2d(&self.prior_covariance),
722 )
723 .map_err(|e| format!("Failed to create MVN: {}", e))?;
724
725 for _ in 0..num_samples {
726 let nu = Array1::from_vec(mvn.sample(&mut rng));
728
729 let log_y = log_likelihood(¤t_state)
731 + (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln();
732
733 let theta = rng
735 .sample(Uniform::new(0.0, 2.0 * std::f64::consts::PI).expect("Operation failed"));
736 let mut theta_min = theta - 2.0 * std::f64::consts::PI;
737 let mut theta_max = theta;
738
739 loop {
741 let cos_theta = theta.cos();
742 let sin_theta = theta.sin();
743
744 let mut proposal = Array1::zeros(self.dimension);
746 for i in 0..self.dimension {
747 proposal[i] = current_state[i] * cos_theta + nu[i] * sin_theta;
748 }
749
750 if log_likelihood(&proposal) > log_y {
752 current_state = proposal;
753 break;
754 }
755
756 if theta < 0.0 {
758 theta_min = theta;
759 } else {
760 theta_max = theta;
761 }
762
763 let new_theta =
765 rng.sample(Uniform::new(theta_min, theta_max).expect("Operation failed"));
766 if (new_theta - theta).abs() < 1e-10 {
767 break;
769 }
770 }
772
773 samples.push(current_state.clone());
774 }
775
776 Ok(samples)
777 }
778
779 fn array_to_vec2d(&self, array: &Array2<f64>) -> Vec<Vec<f64>> {
781 array.rows().into_iter().map(|row| row.to_vec()).collect()
782 }
783}
784
785#[derive(Debug)]
787pub struct ParallelTempering {
788 num_chains: usize,
789 temperatures: Vec<f64>,
790 swap_frequency: usize,
791 chains: Vec<Array1<f64>>,
792}
793
794impl ParallelTempering {
795 pub fn new(num_chains: usize, max_temperature: f64) -> Self {
797 let temperatures: Vec<f64> = (0..num_chains)
799 .map(|i| (max_temperature / 1.0).powf(i as f64 / (num_chains - 1) as f64))
800 .collect();
801
802 Self {
803 num_chains,
804 temperatures,
805 swap_frequency: 10,
806 chains: Vec::new(),
807 }
808 }
809
810 pub fn sample<F>(
812 &mut self,
813 log_density: F,
814 initial_states: Vec<Array1<f64>>,
815 num_samples: usize,
816 seed: u64,
817 ) -> Result<Vec<Array1<f64>>, String>
818 where
819 F: Fn(&Array1<f64>) -> f64 + Send + Sync,
820 {
821 if initial_states.len() != self.num_chains {
822 return Err("Number of initial states must match number of chains".to_string());
823 }
824
825 self.chains = initial_states;
826 let mut samples = Vec::new();
827 let mut rng = seeded_rng(seed);
828
829 for iter in 0..num_samples {
830 for chain_idx in 0..self.num_chains {
832 let temperature = self.temperatures[chain_idx];
833 self.metropolis_update(chain_idx, temperature, &log_density, &mut rng)?;
834 }
835
836 if iter % self.swap_frequency == 0 {
838 self.attempt_swaps(&log_density, &mut rng)?;
839 }
840
841 samples.push(self.chains[0].clone());
843 }
844
845 Ok(samples)
846 }
847
848 fn metropolis_update<F>(
850 &mut self,
851 chain_idx: usize,
852 temperature: f64,
853 log_density: &F,
854 rng: &mut Random<rand::rngs::StdRng>,
855 ) -> Result<(), String>
856 where
857 F: Fn(&Array1<f64>) -> f64,
858 {
859 let current_state = &self.chains[chain_idx];
860 let dimension = current_state.len();
861
862 let mut proposal = current_state.clone();
864 let step_size = 0.1 * temperature.sqrt();
865 for i in 0..dimension {
866 proposal[i] += rng.sample(Normal::new(0.0, step_size).expect("Operation failed"));
867 }
868
869 let current_log_density = log_density(current_state);
871 let proposal_log_density = log_density(&proposal);
872
873 let log_acceptance_prob = (proposal_log_density - current_log_density) / temperature;
874
875 if log_acceptance_prob >= 0.0
877 || (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
878 < log_acceptance_prob
879 {
880 self.chains[chain_idx] = proposal;
881 }
882
883 Ok(())
884 }
885
886 fn attempt_swaps<F>(
888 &mut self,
889 log_density: &F,
890 rng: &mut Random<rand::rngs::StdRng>,
891 ) -> Result<(), String>
892 where
893 F: Fn(&Array1<f64>) -> f64,
894 {
895 for i in 0..(self.num_chains - 1) {
896 let temp_i = self.temperatures[i];
897 let temp_j = self.temperatures[i + 1];
898
899 let log_density_i = log_density(&self.chains[i]);
900 let log_density_j = log_density(&self.chains[i + 1]);
901
902 let log_swap_prob = (log_density_j - log_density_i) * (1.0 / temp_i - 1.0 / temp_j);
904
905 if log_swap_prob >= 0.0
907 || (rng.sample(Uniform::new(0.0, 1.0).expect("Operation failed")) as f64).ln()
908 < log_swap_prob
909 {
910 self.chains.swap(i, i + 1);
911 }
912 }
913
914 Ok(())
915 }
916
917 pub fn get_chain_states(&self) -> &[Array1<f64>] {
919 &self.chains
920 }
921
922 pub fn get_temperatures(&self) -> &[f64] {
924 &self.temperatures
925 }
926}
927
928#[cfg(test)]
929mod tests {
930 use super::*;
931 use approx::assert_relative_eq;
932
933 #[test]
934 fn test_hmc_basic() {
935 let mut hmc = HamiltonianMonteCarlo::new(2, 0.1, 10);
936
937 let samples = hmc
938 .sample(
939 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)), |x| Array1::from_vec(vec![-x[0], -x[1]]), Array1::from_vec(vec![0.0, 0.0]),
942 100,
943 )
944 .expect("Operation failed");
945
946 assert_eq!(samples.len(), 100);
947
948 let mean_x: f64 = samples.iter().map(|s| s[0]).sum::<f64>() / samples.len() as f64;
950 let mean_y: f64 = samples.iter().map(|s| s[1]).sum::<f64>() / samples.len() as f64;
951
952 assert_relative_eq!(mean_x, 0.0, epsilon = 0.5);
953 assert_relative_eq!(mean_y, 0.0, epsilon = 0.5);
954 }
955
956 #[test]
957 fn test_nuts_basic() {
958 let mut nuts = NoUTurnSampler::new(2);
959
960 let samples = nuts
961 .sample_adaptive(
962 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
963 |x| Array1::from_vec(vec![-x[0], -x[1]]),
964 Array1::from_vec(vec![0.0, 0.0]),
965 100,
966 )
967 .expect("Operation failed");
968
969 assert_eq!(samples.len(), 100);
970 }
971
972 #[test]
973 fn test_svgd_basic() {
974 let mut svgd = SteinVariationalGradientDescent::new(50, 0.1);
975
976 let mut initial_particles = Array2::zeros((50, 2));
978 let mut rng = seeded_rng(42);
979 for i in 0..50 {
980 for j in 0..2 {
981 initial_particles[[i, j]] =
982 rng.sample(Normal::new(0.0, 2.0).expect("Operation failed"));
983 }
984 }
985
986 let final_particles = svgd
987 .optimize(
988 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
989 |x| Array1::from_vec(vec![-x[0], -x[1]]),
990 initial_particles,
991 100,
992 )
993 .expect("Operation failed");
994
995 assert_eq!(final_particles.nrows(), 50);
996 assert_eq!(final_particles.ncols(), 2);
997
998 let (mean, _) = svgd.estimate_statistics();
1000 assert_relative_eq!(mean[0], 0.0, epsilon = 0.5);
1001 assert_relative_eq!(mean[1], 0.0, epsilon = 0.5);
1002 }
1003
1004 #[test]
1005 #[ignore]
1006 fn test_elliptical_slice_sampling() {
1007 let prior_cov = Array2::eye(2);
1008 let ess = EllipticalSliceSampler::new(prior_cov);
1009
1010 let samples = ess
1011 .sample(
1012 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)), Array1::from_vec(vec![0.0, 0.0]),
1014 50,
1015 42,
1016 )
1017 .expect("Operation failed");
1018
1019 assert_eq!(samples.len(), 50);
1020 }
1021
1022 #[test]
1023 fn test_parallel_tempering() {
1024 let mut pt = ParallelTempering::new(4, 10.0);
1025
1026 let initial_states = vec![
1027 Array1::from_vec(vec![0.0, 0.0]),
1028 Array1::from_vec(vec![1.0, 1.0]),
1029 Array1::from_vec(vec![-1.0, -1.0]),
1030 Array1::from_vec(vec![0.0, 1.0]),
1031 ];
1032
1033 let samples = pt
1034 .sample(
1035 |x| -0.5 * (x[0].powi(2) + x[1].powi(2)),
1036 initial_states,
1037 100,
1038 42,
1039 )
1040 .expect("Operation failed");
1041
1042 assert_eq!(samples.len(), 100);
1043 assert_eq!(pt.get_temperatures().len(), 4);
1044 }
1045}