markov_chain_monte_carlo/lib.rs
1//! Markov Chain Monte Carlo (MCMC) framework.
2//!
3//! 🚧 **Pre-release (0.x)** — This crate is under active development and
4//! not yet ready for production use. APIs may change without notice.
5//!
6//! This crate aims to provide a composable, zero-cost abstraction for MCMC
7//! methods over arbitrary state spaces, including discrete and combinatorial
8//! systems (e.g., triangulations).
9//!
10//! # Example
11//!
12//! Sample from a standard normal distribution using Metropolis–Hastings:
13//!
14//! ```
15//! use markov_chain_monte_carlo::prelude::*;
16//! use rand::{Rng, RngExt, SeedableRng, rngs::StdRng};
17//!
18//! #[derive(Clone)]
19//! struct Scalar(f64);
20//!
21//! struct Normal;
22//! impl Target<Scalar> for Normal {
23//! fn log_prob(&self, state: &Scalar) -> f64 {
24//! -0.5 * state.0 * state.0
25//! }
26//! }
27//!
28//! struct RandomWalk { width: f64 }
29//! impl Proposal<Scalar> for RandomWalk {
30//! fn propose<R: Rng + ?Sized>(&self, current: &Scalar, rng: &mut R) -> Scalar {
31//! let delta: f64 = rng.random_range(-self.width..self.width);
32//! Scalar(current.0 + delta)
33//! }
34//! }
35//!
36//! fn main() -> Result<(), McmcError> {
37//! let mut rng = StdRng::seed_from_u64(42);
38//! let mut chain = Chain::new(Scalar(0.0), &Normal)?;
39//! let proposal = RandomWalk { width: 1.0 };
40//!
41//! for _ in 0..1000 {
42//! chain.step(&Normal, &proposal, &mut rng)?;
43//! }
44//!
45//! assert!(chain.acceptance_rate() > 0.2);
46//! Ok(())
47//! }
48//! ```
49//!
50//! # In-place mutation with rollback
51//!
52//! For state spaces where cloning is expensive, use [`ProposalMut`] with
53//! [`Chain::step_mut`]. The proposal mutates the state in place and returns
54//! a small undo token for rollback on rejection:
55//!
56//! ```
57//! use markov_chain_monte_carlo::prelude::*;
58//! use rand::{Rng, RngExt, SeedableRng, rngs::StdRng};
59//!
60//! /// A lattice of spins (not Clone — only mutated in place).
61//! struct SpinChain { spins: Vec<i8> }
62//!
63//! /// Energy = −Σ s_i · s_{i+1} (1-D Ising, no field).
64//! struct Ising;
65//! impl Target<SpinChain> for Ising {
66//! fn log_prob(&self, state: &SpinChain) -> f64 {
67//! let s = &state.spins;
68//! let energy: f64 = s.windows(2)
69//! .map(|w| -f64::from(w[0]) * f64::from(w[1]))
70//! .sum();
71//! -energy // log_prob = −E (T = 1)
72//! }
73//! }
74//!
75//! /// Flip one random spin; undo token is the site index.
76//! struct SpinFlip;
77//! impl ProposalMut<SpinChain> for SpinFlip {
78//! type Undo = usize;
79//! fn propose_mut<R: Rng + ?Sized>(&self, state: &mut SpinChain, rng: &mut R) -> Option<usize> {
80//! if state.spins.is_empty() { return None; }
81//! let idx = rng.random_range(0..state.spins.len());
82//! state.spins[idx] *= -1;
83//! Some(idx)
84//! }
85//! fn undo(&self, state: &mut SpinChain, idx: usize) {
86//! state.spins[idx] *= -1; // flipping twice = identity
87//! }
88//! }
89//!
90//! fn main() -> Result<(), McmcError> {
91//! let mut rng = StdRng::seed_from_u64(42);
92//! let state = SpinChain { spins: vec![1; 20] };
93//! let mut chain = Chain::new(state, &Ising)?;
94//!
95//! for _ in 0..1000 {
96//! chain.step_mut(&Ising, &SpinFlip, &mut rng)?;
97//! }
98//!
99//! assert!(chain.acceptance_rate() > 0.0);
100//! Ok(())
101//! }
102//! ```
103//!
104//! # Ergonomic sampling with [`Sampler`]
105//!
106//! [`Sampler`] bundles a chain with its target, proposal, and RNG so you
107//! don't have to pass them on every step:
108//!
109//! ```
110//! use markov_chain_monte_carlo::prelude::*;
111//! use rand::{Rng, RngExt, SeedableRng, rngs::StdRng};
112//!
113//! # #[derive(Clone)] struct Scalar(f64);
114//! # struct Normal;
115//! # impl Target<Scalar> for Normal {
116//! # fn log_prob(&self, s: &Scalar) -> f64 { -0.5 * s.0 * s.0 }
117//! # }
118//! # struct Walk;
119//! # impl Proposal<Scalar> for Walk {
120//! # fn propose<R: Rng + ?Sized>(&self, c: &Scalar, r: &mut R) -> Scalar {
121//! # Scalar(c.0 + r.random_range(-1.0..1.0))
122//! # }
123//! # }
124//! let mut rng = StdRng::seed_from_u64(42);
125//! let chain = Chain::new(Scalar(0.0), &Normal)?;
126//! let mut sampler = Sampler::new(chain, &Normal, &Walk, &mut rng);
127//!
128//! // Burn-in
129//! sampler.run(1000)?;
130//! sampler.chain.reset_counters();
131//!
132//! // Production
133//! sampler.run(10_000)?;
134//! assert!(sampler.chain.acceptance_rate() > 0.0);
135//! # Ok::<(), McmcError>(())
136//! ```
137
138mod chain;
139mod error;
140mod sampler;
141mod traits;
142
143pub use chain::Chain;
144pub use error::McmcError;
145pub use sampler::Sampler;
146pub use traits::{Proposal, ProposalMut, Target};
147
148/// Convenience re-exports for common usage.
149///
150/// ```
151/// use markov_chain_monte_carlo::prelude::*;
152/// ```
153pub mod prelude {
154 pub use crate::{Chain, McmcError, Proposal, ProposalMut, Sampler, Target};
155}