Skip to main content

zenbench/
lib.rs

1// Without precise-timing or alloc-profiling, no unsafe is permitted anywhere.
2// With either feature, unsafe is denied (errors) but the timing/alloc modules
3// can override with #[allow(unsafe_code)] for TSC reads, asm fences, and GlobalAlloc.
4#![cfg_attr(
5    not(any(feature = "precise-timing", feature = "alloc-profiling")),
6    forbid(unsafe_code)
7)]
8#![cfg_attr(
9    any(feature = "precise-timing", feature = "alloc-profiling"),
10    deny(unsafe_code)
11)]
12#![doc = include_str!("../README.md")]
13
14#[cfg(feature = "alloc-profiling")]
15mod alloc;
16pub mod baseline;
17mod bench;
18pub mod calibration;
19#[cfg(feature = "charts")]
20pub mod charts;
21mod checks;
22mod ci;
23#[cfg(feature = "criterion-compat")]
24pub mod criterion_compat;
25pub mod daemon;
26mod engine;
27mod format;
28mod gate;
29mod html;
30pub mod mcp;
31pub mod platform;
32mod report;
33mod results;
34mod stats;
35#[cfg(feature = "precise-timing")]
36mod timing;
37
38pub use bench::{BenchGroup, Bencher, GroupConfig, Suite, Throughput};
39
40/// Post-run processing: format output, save baseline, compare against baseline.
41///
42/// Shared between `main!` and `criterion_main!` macros. Not intended for
43/// direct use — call via the macros instead.
44#[doc(hidden)]
45pub fn postprocess_result(result: &SuiteResult) {
46    let args: Vec<String> = std::env::args().collect();
47    let format = args
48        .iter()
49        .find_map(|a| a.strip_prefix("--format=").map(String::from))
50        .or_else(|| std::env::var("ZENBENCH_FORMAT").ok());
51    let save_baseline: Option<String> = args
52        .iter()
53        .find_map(|a| a.strip_prefix("--save-baseline=").map(String::from));
54    let baseline_name: Option<String> = args
55        .iter()
56        .find_map(|a| a.strip_prefix("--baseline=").map(String::from));
57    let max_regression: f64 = args
58        .iter()
59        .find_map(|a| {
60            a.strip_prefix("--max-regression=")
61                .and_then(|v| v.parse().ok())
62        })
63        .unwrap_or(5.0);
64    let update_on_pass = args.iter().any(|a| a == "--update-on-pass");
65
66    // Output in requested format (to stdout)
67    match format.as_deref() {
68        Some("llm") => print!("{}", result.to_llm()),
69        Some("csv") => print!("{}", result.to_csv()),
70        Some("markdown" | "md") => print!("{}", result.to_markdown()),
71        Some("html") => print!("{}", result.to_html()),
72        Some("json") => {
73            if let Ok(json) = serde_json::to_string_pretty(result) {
74                println!("{json}");
75            }
76        }
77        _ => {} // default: terminal report already printed to stderr
78    }
79
80    // Save as named baseline
81    if let Some(ref name) = save_baseline {
82        match baseline::save_baseline(result, name) {
83            Ok(path) => eprintln!("[zenbench] baseline '{name}' saved to {}", path.display()),
84            Err(e) => {
85                eprintln!("[zenbench] error saving baseline '{name}': {e}");
86                std::process::exit(2);
87            }
88        }
89    }
90
91    // Compare against named baseline
92    if let Some(ref name) = baseline_name {
93        match baseline::load_baseline(name) {
94            Ok(saved) => {
95                let comparison = baseline::compare_against_baseline(&saved, result, max_regression);
96                baseline::print_comparison_report(&comparison);
97
98                if comparison.regressions > 0 {
99                    eprintln!(
100                        "\n[zenbench] FAIL: {} regression(s) exceed {max_regression}% threshold",
101                        comparison.regressions,
102                    );
103                    std::process::exit(1);
104                } else {
105                    eprintln!(
106                        "\n[zenbench] PASS: no regressions exceed {max_regression}% threshold"
107                    );
108                    // --update-on-pass: overwrite baseline with current results
109                    if update_on_pass {
110                        match baseline::save_baseline(result, name) {
111                            Ok(path) => eprintln!(
112                                "[zenbench] baseline '{name}' updated (--update-on-pass) → {}",
113                                path.display()
114                            ),
115                            Err(e) => {
116                                eprintln!("[zenbench] warning: failed to update baseline: {e}");
117                            }
118                        }
119                    }
120                }
121            }
122            Err(e) => {
123                eprintln!("[zenbench] {e}");
124                std::process::exit(2);
125            }
126        }
127    }
128
129    // Save results if in fire-and-forget mode
130    if let Some(path) = daemon::result_path_from_env()
131        && let Err(e) = result.save(&path)
132    {
133        eprintln!("[zenbench] error saving results: {e}");
134    }
135}
136#[cfg(feature = "alloc-profiling")]
137pub use alloc::{AllocProfiler, AllocStats};
138
139/// Create an Engine from a Suite (used by criterion_compat macros).
140#[doc(hidden)]
141pub fn engine_new(suite: Suite) -> engine::Engine {
142    engine::Engine::new(suite)
143}
144pub use format::format_ns;
145pub use gate::GateConfig;
146pub use platform::Testbed;
147pub use results::{BenchmarkResult, ComparisonResult, RunId, SuiteResult};
148pub use stats::{MeanCi, PairedAnalysis, Summary};
149
150/// Re-export `black_box` from std for convenience.
151///
152/// Prevents the compiler from optimizing away benchmark code.
153/// Always use this on benchmark return values and inputs.
154#[inline(always)]
155pub fn black_box<T>(x: T) -> T {
156    std::hint::black_box(x)
157}
158
159/// Prelude for convenient imports.
160///
161/// ```
162/// use zenbench::prelude::*;
163/// ```
164pub mod prelude {
165    pub use crate::bench::{BenchGroup, Bencher, GroupConfig, Suite, Throughput};
166    pub use crate::black_box;
167    pub use crate::gate::GateConfig;
168    pub use crate::results::SuiteResult;
169    pub use crate::stats::{MeanCi, PairedAnalysis, Summary};
170}
171
172/// Run a benchmark suite with default configuration.
173///
174/// # Example
175/// ```no_run
176/// zenbench::run(|suite| {
177///     suite.compare("sorting", |group| {
178///         let data: Vec<i32> = (0..1000).rev().collect();
179///         group.bench("std_sort", move |b| {
180///             let d = data.clone();
181///             b.with_input(move || d.clone())
182///                 .run(|mut v| { v.sort(); v })
183///         });
184///     });
185/// });
186/// ```
187pub fn run<F: FnOnce(&mut Suite)>(f: F) -> SuiteResult {
188    let mut suite = Suite::new();
189    f(&mut suite);
190    let engine = engine::Engine::new(suite);
191    engine.run()
192}
193
194/// Run a benchmark suite with custom gate configuration.
195pub fn run_gated<F: FnOnce(&mut Suite)>(gate: GateConfig, f: F) -> SuiteResult {
196    let mut suite = Suite::new();
197    f(&mut suite);
198    let engine = engine::Engine::with_gate(suite, gate);
199    engine.run()
200}
201
202/// Run a benchmark suite and save results to a JSON file.
203///
204/// If the `ZENBENCH_RESULT_PATH` env var is set (fire-and-forget mode),
205/// results are saved there. Otherwise, results are saved to a timestamped
206/// file in the current directory.
207pub fn run_and_save<F: FnOnce(&mut Suite)>(f: F) -> SuiteResult {
208    let result = run(f);
209
210    let path = daemon::result_path_from_env().unwrap_or_else(|| {
211        let name = format!("zenbench-{}.json", result.run_id);
212        std::path::PathBuf::from(name)
213    });
214
215    if let Err(e) = result.save(&path) {
216        eprintln!("[zenbench] error saving results to {}: {e}", path.display());
217    } else {
218        eprintln!("[zenbench] results saved to {}", path.display());
219    }
220
221    result
222}
223
224/// Macro for defining benchmark binaries with `cargo bench`.
225///
226/// Use this in a `benches/*.rs` file with `harness = false` in `Cargo.toml`.
227///
228/// # Examples
229///
230/// **Function list** (composable — recommended):
231/// ```rust,ignore
232/// use zenbench::prelude::*;
233///
234/// fn bench_sort(suite: &mut Suite) {
235///     suite.group("sort", |g| {
236///         g.throughput(Throughput::Elements(1000));
237///         g.bench("std_sort", |b| {
238///             b.with_input(|| (0..1000).rev().collect::<Vec<i32>>())
239///                 .run(|mut v| { v.sort(); v })
240///         });
241///         g.bench("sort_unstable", |b| {
242///             b.with_input(|| (0..1000).rev().collect::<Vec<i32>>())
243///                 .run(|mut v| { v.sort_unstable(); v })
244///         });
245///     });
246/// }
247///
248/// fn bench_fib(suite: &mut Suite) {
249///     suite.bench_fn("fibonacci", || black_box(fib(20)));
250/// }
251///
252/// zenbench::main!(bench_sort, bench_fib);
253/// ```
254///
255/// **Closure** (quick single-file):
256/// ```rust,ignore
257/// zenbench::main!(|suite| {
258///     suite.group("sort", |g| {
259///         g.bench("std", |b| b.iter(|| data.sort()));
260///         g.bench("unstable", |b| b.iter(|| data.sort_unstable()));
261///     });
262/// });
263/// ```
264///
265/// In `Cargo.toml`:
266/// ```toml
267/// [[bench]]
268/// name = "my_bench"
269/// harness = false
270/// ```
271/// Macro for defining benchmark binaries.
272///
273/// Two forms:
274///
275/// **Function list** (composable, like criterion_group + criterion_main):
276/// ```rust,ignore
277/// fn bench_sort(suite: &mut zenbench::Suite) {
278///     suite.compare("sort", |group| {
279///         group.bench("std", |b| b.iter(|| data.sort()));
280///         group.bench("unstable", |b| b.iter(|| data.sort_unstable()));
281///     });
282/// }
283///
284/// fn bench_hash(suite: &mut zenbench::Suite) {
285///     suite.compare("hash", |group| { /* ... */ });
286/// }
287///
288/// zenbench::main!(bench_sort, bench_hash);
289/// ```
290///
291/// **Closure** (quick and simple):
292/// ```rust,ignore
293/// zenbench::main!(|suite| {
294///     suite.compare("sort", |group| { /* ... */ });
295/// });
296/// ```
297#[macro_export]
298macro_rules! main {
299    // Form 1: function list — composable, like criterion
300    ($($func:path),+ $(,)?) => {
301        fn main() {
302            let group_filter: Option<String> = std::env::args()
303                .find_map(|a| a.strip_prefix("--group=").map(String::from));
304
305            let result = $crate::run(|suite: &mut $crate::Suite| {
306                if let Some(ref filter) = group_filter {
307                    suite.set_group_filter(filter.clone());
308                }
309                $( $func(suite); )+
310            });
311
312            $crate::postprocess_result(&result);
313        }
314    };
315    // Form 2: closure — quick single-file benchmarks
316    (|$suite:ident| $body:block) => {
317        fn main() {
318            let group_filter: Option<String> = std::env::args()
319                .find_map(|a| a.strip_prefix("--group=").map(String::from));
320
321            let result = $crate::run(|$suite: &mut $crate::Suite| {
322                if let Some(ref filter) = group_filter {
323                    $suite.set_group_filter(filter.clone());
324                }
325                $body
326            });
327
328            $crate::postprocess_result(&result);
329        }
330    };
331}