mini_mcmc/lib.rs
1//! # Mini MCMC
2//!
3//! A compact Rust library offering **Markov Chain Monte Carlo (MCMC)** methods,
4//! including **Hamiltonian Monte Carlo (HMC)**, **Metropolis–Hastings**, and
5//! **Gibbs Sampling** for both discrete and continuous targets.
6//!
7//! ## Getting Started
8//!
9//! To use this library, add it to your project:
10//! ```bash
11//! cargo add mini-mcmc
12//! ```
13//!
14//! The library provides three main sampling approaches:
15//! 1. **Metropolis-Hastings**: For general-purpose sampling. You need to provide:
16//! - A target distribution implementing the `Target` trait
17//! - A proposal distribution implementing the `Proposal` trait
18//! 2. **Hamiltonian Monte Carlo (HMC)**: For continuous distributions with gradients. You need to provide:
19//! - A target distribution implementing the `GradientTarget` trait
20//! 3. **Gibbs Sampling**: For sampling when conditional distributions are available. You need to provide:
21//! - A distribution implementing the `Conditional` trait
22//!
23//! ## Example 1: Sampling a 2D Gaussian (Metropolis–Hastings)
24//!
25//! ```rust
26//! use mini_mcmc::core::{ChainRunner, init};
27//! use mini_mcmc::distributions::{Gaussian2D, IsotropicGaussian};
28//! use mini_mcmc::metropolis_hastings::MetropolisHastings;
29//! use ndarray::{arr1, arr2};
30//!
31//! let target = Gaussian2D {
32//! mean: arr1(&[0.0, 0.0]),
33//! cov: arr2(&[[1.0, 0.0], [0.0, 1.0]]),
34//! };
35//! let proposal = IsotropicGaussian::new(1.0);
36//! let initial_state = [0.0, 0.0];
37//!
38//! let mut mh = MetropolisHastings::new(target, proposal, init(4, 2));
39//! let samples = mh.run(1000, 100).unwrap();
40//! println!("Metropolis–Hastings samples shape: {:?}", samples.shape());
41//! ```
42//!
43//! ## Example 2: Sampling a 3D Rosenbrock (HMC)
44//!
45//! ```rust
46//! use burn::tensor::Element;
47//! use burn::{backend::Autodiff, prelude::Tensor};
48//! use mini_mcmc::hmc::HMC;
49//! use mini_mcmc::distributions::GradientTarget;
50//! use mini_mcmc::core::init;
51//! use num_traits::Float;
52//!
53//! /// The 3D Rosenbrock distribution.
54//! ///
55//! /// For a point x = (x₁, x₂, x₃), the log density is defined as
56//! ///
57//! /// f(x) = 100*(x₂ - x₁²)² + (1 - x₁)² + 100*(x₃ - x₂²)² + (1 - x₂)².
58//! ///
59//! /// This implementation generalizes to d dimensions, but here we use it for 3D.
60//! struct RosenbrockND {}
61//!
62//! impl<T, B> GradientTarget<T, B> for RosenbrockND
63//! where
64//! T: Float + std::fmt::Debug + Element,
65//! B: burn::tensor::backend::AutodiffBackend,
66//! {
67//! fn unnorm_logp(&self, positions: Tensor<B, 2>) -> Tensor<B, 1> {
68//! // Assume positions has shape [n_chains, d] with d = 3.
69//! let k = positions.dims()[0] as i64;
70//! let n = positions.dims()[1] as i64;
71//! let low = positions.clone().slice([(0, k), (0, n - 1)]);
72//! let high = positions.clone().slice([(0, k), (1, n)]);
73//! let term_1 = (high - low.clone().powi_scalar(2))
74//! .powi_scalar(2)
75//! .mul_scalar(100);
76//! let term_2 = low.neg().add_scalar(1).powi_scalar(2);
77//! -(term_1 + term_2).sum_dim(1).squeeze(1)
78//! }
79//! }
80//!
81//! // Use the CPU backend wrapped in Autodiff (e.g., NdArray).
82//! type BackendType = Autodiff<burn::backend::NdArray>;
83//!
84//! // Create the 3D Rosenbrock target.
85//! let target = RosenbrockND {};
86//!
87//! // Define initial positions for 6 chains (each a 3D point).
88//! let initial_positions = init(6, 3);
89//!
90//! // Create the HMC sampler with a step size of 0.01 and 5 leapfrog steps.
91//! let mut sampler = HMC::<f32, BackendType, RosenbrockND>::new(
92//! target,
93//! initial_positions,
94//! 0.032,
95//! 5,
96//! );
97//!
98//! // Run the sampler for 123+45 iterations, discard 45 burnin samples
99//! let samples = sampler.run(123, 45);
100//!
101//! // Print the shape of the collected samples.
102//! println!("Collected samples with shape: {:?}", samples.dims());
103//! ```
104//!
105//! ## Example 3: Sampling a Poisson Distribution (Discrete)
106//!
107//! ```rust
108//! use mini_mcmc::core::{ChainRunner, init};
109//! use mini_mcmc::distributions::{Proposal, Target};
110//! use mini_mcmc::metropolis_hastings::MetropolisHastings;
111//! use rand::Rng;
112//!
113//! #[derive(Clone)]
114//! struct PoissonTarget {
115//! lambda: f64,
116//! }
117//!
118//! impl Target<usize, f64> for PoissonTarget {
119//! /// unnorm_logp(k) = log( p(k) ), ignoring normalizing constants if you wish.
120//! /// For Poisson(k|lambda) = exp(-lambda) * (lambda^k / k!)
121//! /// so log p(k) = -lambda + k*ln(lambda) - ln(k!)
122//! /// which is enough to do MH acceptance.
123//! fn unnorm_logp(&self, theta: &[usize]) -> f64 {
124//! let k = theta[0];
125//! -self.lambda + (k as f64) * self.lambda.ln() - ln_factorial(k as u64)
126//! }
127//! }
128//!
129//! #[derive(Clone)]
130//! struct NonnegativeProposal;
131//!
132//! impl Proposal<usize, f64> for NonnegativeProposal {
133//! fn sample(&mut self, current: &[usize]) -> Vec<usize> {
134//! let x = current[0];
135//! if x == 0 {
136//! vec![1]
137//! } else {
138//! let step_up = rand::thread_rng().gen_bool(0.5);
139//! vec![if step_up { x + 1 } else { x - 1 }]
140//! }
141//! }
142//!
143//! fn logp(&self, from: &[usize], to: &[usize]) -> f64 {
144//! let (x, y) = (from[0], to[0]);
145//! if x == 0 && y == 1 {
146//! 0.0
147//! } else if x > 0 && (y == x + 1 || y == x - 1) {
148//! (0.5_f64).ln()
149//! } else {
150//! f64::NEG_INFINITY
151//! }
152//! }
153//! fn set_seed(self, _seed: u64) -> Self {
154//! self
155//! }
156//! }
157//!
158//! fn ln_factorial(k: u64) -> f64 {
159//! (1..=k).map(|v| (v as f64).ln()).sum()
160//! }
161//!
162//! let target = PoissonTarget { lambda: 4.0 };
163//! let proposal = NonnegativeProposal;
164//! let initial_state = vec![vec![0]];
165//!
166//! let mut mh = MetropolisHastings::new(target, proposal, initial_state);
167//! let samples = mh.run(5000, 100).unwrap();
168//! println!("Poisson samples shape: {:?}", samples.shape());
169//! ```
170//!
171//! For more complete implementations (including Gibbs sampling and I/O helpers),
172//! see the `examples/` directory.
173//!
174//! ## Features
175//! - **Parallel Chains** for improved throughput
176//! - **Progress Indicators** (acceptance rates, iteration counts)
177//! - **Common Distributions** (e.g. Gaussian) plus easy traits for custom log‐prob
178//! - **Optional I/O** (CSV, Arrow, Parquet) and GPU sampling (WGPU)
179//! - **Effective Sample Size (ESS)** estimation following STAN's methodology
180//! - **R-hat Diagnostics** for convergence monitoring
181//!
182//! ## Roadmap
183//! - No-U-Turn Sampler (NUTS)
184//! - Rank-Normalized R-hat diagnostics
185//! - Ensemble Slice Sampling (ESS)
186
187pub mod core;
188mod dev_tools;
189pub mod distributions;
190pub mod gibbs;
191pub mod hmc;
192pub mod io;
193pub mod ks_test;
194pub mod metropolis_hastings;
195pub mod stats;