mobench_runner/lib.rs
1//! # mobench-runner
2//!
3//! A lightweight benchmarking harness designed for mobile platforms.
4//!
5//! This crate provides the core timing infrastructure for the mobench ecosystem.
6//! It's designed to be minimal and portable, with no platform-specific dependencies,
7//! making it suitable for compilation to Android and iOS targets.
8//!
9//! ## Overview
10//!
11//! The runner executes benchmark functions with:
12//! - Configurable warmup iterations
13//! - Precise nanosecond-resolution timing
14//! - Simple, serializable results
15//!
16//! ## Usage
17//!
18//! Most users should use this crate via [`mobench-sdk`](https://crates.io/crates/mobench-sdk).
19//! Direct usage is typically only needed for custom integrations:
20//!
21//! ```
22//! use mobench_runner::{BenchSpec, run_closure, BenchError};
23//!
24//! // Define a benchmark specification
25//! let spec = BenchSpec::new("my_benchmark", 100, 10)?;
26//!
27//! // Run the benchmark
28//! let report = run_closure(spec, || {
29//! // Your benchmark code
30//! let sum: u64 = (0..1000).sum();
31//! std::hint::black_box(sum);
32//! Ok(())
33//! })?;
34//!
35//! // Analyze results
36//! let mean_ns = report.samples.iter()
37//! .map(|s| s.duration_ns)
38//! .sum::<u64>() / report.samples.len() as u64;
39//!
40//! println!("Mean: {} ns", mean_ns);
41//! # Ok::<(), BenchError>(())
42//! ```
43//!
44//! ## Types
45//!
46//! | Type | Description |
47//! |------|-------------|
48//! | [`BenchSpec`] | Benchmark configuration (name, iterations, warmup) |
49//! | [`BenchSample`] | Single timing measurement in nanoseconds |
50//! | [`BenchReport`] | Complete results with all samples |
51//! | [`BenchError`] | Error conditions during benchmarking |
52//!
53//! ## Crate Ecosystem
54//!
55//! This crate is part of the mobench ecosystem:
56//!
57//! - **[`mobench-sdk`](https://crates.io/crates/mobench-sdk)** - Core SDK with build automation
58//! - **[`mobench`](https://crates.io/crates/mobench)** - CLI tool
59//! - **[`mobench-macros`](https://crates.io/crates/mobench-macros)** - `#[benchmark]` proc macro
60//! - **`mobench-runner`** (this crate) - Timing harness
61
62use serde::{Deserialize, Serialize};
63use std::time::{Duration, Instant};
64use thiserror::Error;
65
66/// Benchmark specification defining what and how to benchmark.
67///
68/// Contains the benchmark name, number of measurement iterations, and
69/// warmup iterations to perform before measuring.
70///
71/// # Example
72///
73/// ```
74/// use mobench_runner::BenchSpec;
75///
76/// // Create a spec for 100 iterations with 10 warmup runs
77/// let spec = BenchSpec::new("sorting_benchmark", 100, 10)?;
78///
79/// assert_eq!(spec.name, "sorting_benchmark");
80/// assert_eq!(spec.iterations, 100);
81/// assert_eq!(spec.warmup, 10);
82/// # Ok::<(), mobench_runner::BenchError>(())
83/// ```
84///
85/// # Serialization
86///
87/// `BenchSpec` implements `Serialize` and `Deserialize` for JSON persistence:
88///
89/// ```
90/// use mobench_runner::BenchSpec;
91///
92/// let spec = BenchSpec {
93/// name: "my_bench".to_string(),
94/// iterations: 50,
95/// warmup: 5,
96/// };
97///
98/// let json = serde_json::to_string(&spec)?;
99/// let restored: BenchSpec = serde_json::from_str(&json)?;
100///
101/// assert_eq!(spec.name, restored.name);
102/// # Ok::<(), serde_json::Error>(())
103/// ```
104#[derive(Clone, Debug, Serialize, Deserialize)]
105pub struct BenchSpec {
106 /// Name of the benchmark, typically the fully-qualified function name.
107 ///
108 /// Examples: `"my_crate::fibonacci"`, `"sorting_benchmark"`
109 pub name: String,
110
111 /// Number of iterations to measure.
112 ///
113 /// Each iteration produces one [`BenchSample`]. Must be greater than zero.
114 pub iterations: u32,
115
116 /// Number of warmup iterations before measurement.
117 ///
118 /// Warmup iterations are not recorded. They allow CPU caches to warm
119 /// and any JIT compilation to complete. Can be zero.
120 pub warmup: u32,
121}
122
123impl BenchSpec {
124 /// Creates a new benchmark specification.
125 ///
126 /// # Arguments
127 ///
128 /// * `name` - Name identifier for the benchmark
129 /// * `iterations` - Number of measured iterations (must be > 0)
130 /// * `warmup` - Number of warmup iterations (can be 0)
131 ///
132 /// # Errors
133 ///
134 /// Returns [`BenchError::NoIterations`] if `iterations` is zero.
135 ///
136 /// # Example
137 ///
138 /// ```
139 /// use mobench_runner::BenchSpec;
140 ///
141 /// let spec = BenchSpec::new("test", 100, 10)?;
142 /// assert_eq!(spec.iterations, 100);
143 ///
144 /// // Zero iterations is an error
145 /// let err = BenchSpec::new("test", 0, 10);
146 /// assert!(err.is_err());
147 /// # Ok::<(), mobench_runner::BenchError>(())
148 /// ```
149 pub fn new(name: impl Into<String>, iterations: u32, warmup: u32) -> Result<Self, BenchError> {
150 if iterations == 0 {
151 return Err(BenchError::NoIterations);
152 }
153
154 Ok(Self {
155 name: name.into(),
156 iterations,
157 warmup,
158 })
159 }
160}
161
162/// A single timing sample from a benchmark iteration.
163///
164/// Contains the elapsed time in nanoseconds for one execution of the
165/// benchmark function.
166///
167/// # Example
168///
169/// ```
170/// use mobench_runner::BenchSample;
171///
172/// let sample = BenchSample { duration_ns: 1_500_000 };
173///
174/// // Convert to milliseconds
175/// let ms = sample.duration_ns as f64 / 1_000_000.0;
176/// assert_eq!(ms, 1.5);
177/// ```
178#[derive(Clone, Debug, Serialize, Deserialize)]
179pub struct BenchSample {
180 /// Duration of the iteration in nanoseconds.
181 ///
182 /// Measured using [`std::time::Instant`] for monotonic, high-resolution timing.
183 pub duration_ns: u64,
184}
185
186impl BenchSample {
187 /// Creates a sample from a [`Duration`].
188 fn from_duration(duration: Duration) -> Self {
189 Self {
190 duration_ns: duration.as_nanos() as u64,
191 }
192 }
193}
194
195/// Complete benchmark report with all timing samples.
196///
197/// Contains the original specification and all collected samples.
198/// Can be serialized to JSON for storage or transmission.
199///
200/// # Example
201///
202/// ```
203/// use mobench_runner::{BenchSpec, run_closure};
204///
205/// let spec = BenchSpec::new("example", 50, 5)?;
206/// let report = run_closure(spec, || {
207/// std::hint::black_box(42);
208/// Ok(())
209/// })?;
210///
211/// // Calculate statistics
212/// let samples: Vec<u64> = report.samples.iter()
213/// .map(|s| s.duration_ns)
214/// .collect();
215///
216/// let min = samples.iter().min().unwrap();
217/// let max = samples.iter().max().unwrap();
218/// let mean = samples.iter().sum::<u64>() / samples.len() as u64;
219///
220/// println!("Min: {} ns, Max: {} ns, Mean: {} ns", min, max, mean);
221/// # Ok::<(), mobench_runner::BenchError>(())
222/// ```
223#[derive(Clone, Debug, Serialize, Deserialize)]
224pub struct BenchReport {
225 /// The specification used for this benchmark run.
226 pub spec: BenchSpec,
227
228 /// All collected timing samples.
229 ///
230 /// The length equals `spec.iterations`. Samples are in execution order.
231 pub samples: Vec<BenchSample>,
232}
233
234/// Errors that can occur during benchmark execution.
235///
236/// # Example
237///
238/// ```
239/// use mobench_runner::{BenchSpec, BenchError};
240///
241/// // Zero iterations produces an error
242/// let result = BenchSpec::new("test", 0, 10);
243/// assert!(matches!(result, Err(BenchError::NoIterations)));
244/// ```
245#[derive(Debug, Error)]
246pub enum BenchError {
247 /// The iteration count was zero.
248 ///
249 /// At least one iteration is required to produce a measurement.
250 #[error("iterations must be greater than zero")]
251 NoIterations,
252
253 /// The benchmark function failed during execution.
254 ///
255 /// Contains a description of the failure.
256 #[error("benchmark function failed: {0}")]
257 Execution(String),
258}
259
260/// Runs a benchmark by executing a closure repeatedly.
261///
262/// This is the core benchmarking function. It:
263///
264/// 1. Executes the closure `spec.warmup` times without recording
265/// 2. Executes the closure `spec.iterations` times, recording each duration
266/// 3. Returns a [`BenchReport`] with all samples
267///
268/// # Arguments
269///
270/// * `spec` - Benchmark configuration specifying iterations and warmup
271/// * `f` - Closure to benchmark; must return `Result<(), BenchError>`
272///
273/// # Returns
274///
275/// A [`BenchReport`] containing all timing samples, or a [`BenchError`] if
276/// the benchmark fails.
277///
278/// # Example
279///
280/// ```
281/// use mobench_runner::{BenchSpec, run_closure, BenchError};
282///
283/// let spec = BenchSpec::new("sum_benchmark", 100, 10)?;
284///
285/// let report = run_closure(spec, || {
286/// let sum: u64 = (0..1000).sum();
287/// std::hint::black_box(sum);
288/// Ok(())
289/// })?;
290///
291/// assert_eq!(report.samples.len(), 100);
292///
293/// // Calculate mean duration
294/// let total_ns: u64 = report.samples.iter().map(|s| s.duration_ns).sum();
295/// let mean_ns = total_ns / report.samples.len() as u64;
296/// println!("Mean: {} ns", mean_ns);
297/// # Ok::<(), BenchError>(())
298/// ```
299///
300/// # Error Handling
301///
302/// If the closure returns an error, the benchmark stops immediately:
303///
304/// ```
305/// use mobench_runner::{BenchSpec, run_closure, BenchError};
306///
307/// let spec = BenchSpec::new("failing_bench", 100, 0)?;
308///
309/// let result = run_closure(spec, || {
310/// Err(BenchError::Execution("simulated failure".into()))
311/// });
312///
313/// assert!(result.is_err());
314/// # Ok::<(), BenchError>(())
315/// ```
316///
317/// # Timing Precision
318///
319/// Uses [`std::time::Instant`] for timing, which provides monotonic,
320/// nanosecond-resolution measurements on most platforms.
321pub fn run_closure<F>(spec: BenchSpec, mut f: F) -> Result<BenchReport, BenchError>
322where
323 F: FnMut() -> Result<(), BenchError>,
324{
325 if spec.iterations == 0 {
326 return Err(BenchError::NoIterations);
327 }
328
329 // Warmup phase - not measured
330 for _ in 0..spec.warmup {
331 f()?;
332 }
333
334 // Measurement phase
335 let mut samples = Vec::with_capacity(spec.iterations as usize);
336 for _ in 0..spec.iterations {
337 let start = Instant::now();
338 f()?;
339 samples.push(BenchSample::from_duration(start.elapsed()));
340 }
341
342 Ok(BenchReport { spec, samples })
343}
344
345#[cfg(test)]
346mod tests {
347 use super::*;
348
349 #[test]
350 fn runs_benchmark() {
351 let spec = BenchSpec::new("noop", 3, 1).unwrap();
352 let report = run_closure(spec, || Ok(())).unwrap();
353
354 assert_eq!(report.samples.len(), 3);
355 let non_zero = report.samples.iter().filter(|s| s.duration_ns > 0).count();
356 assert!(non_zero >= 1);
357 }
358
359 #[test]
360 fn rejects_zero_iterations() {
361 let result = BenchSpec::new("test", 0, 10);
362 assert!(matches!(result, Err(BenchError::NoIterations)));
363 }
364
365 #[test]
366 fn allows_zero_warmup() {
367 let spec = BenchSpec::new("test", 5, 0).unwrap();
368 assert_eq!(spec.warmup, 0);
369
370 let report = run_closure(spec, || Ok(())).unwrap();
371 assert_eq!(report.samples.len(), 5);
372 }
373
374 #[test]
375 fn serializes_to_json() {
376 let spec = BenchSpec::new("test", 10, 2).unwrap();
377 let report = run_closure(spec, || Ok(())).unwrap();
378
379 let json = serde_json::to_string(&report).unwrap();
380 let restored: BenchReport = serde_json::from_str(&json).unwrap();
381
382 assert_eq!(restored.spec.name, "test");
383 assert_eq!(restored.samples.len(), 10);
384 }
385}