mobench_sdk/timing.rs
1//! Lightweight benchmarking harness for mobile platforms.
2//!
3//! This module provides the core timing infrastructure for the mobench ecosystem.
4//! It was previously a separate crate (`mobench-runner`) but has been consolidated
5//! into `mobench-sdk` for a simpler dependency graph.
6//!
7//! The module is designed to be minimal and portable, with no platform-specific
8//! dependencies, making it suitable for compilation to Android and iOS targets.
9//!
10//! ## Overview
11//!
12//! The timing module executes benchmark functions with:
13//! - Configurable warmup iterations
14//! - Precise nanosecond-resolution timing
15//! - Simple, serializable results
16//!
17//! ## Usage
18//!
19//! Most users should use this via the higher-level [`crate::run_benchmark`] function
20//! or [`crate::BenchmarkBuilder`]. Direct usage is for custom integrations:
21//!
22//! ```
23//! use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
24//!
25//! // Define a benchmark specification
26//! let spec = BenchSpec::new("my_benchmark", 100, 10)?;
27//!
28//! // Run the benchmark
29//! let report = run_closure(spec, || {
30//! // Your benchmark code
31//! let sum: u64 = (0..1000).sum();
32//! std::hint::black_box(sum);
33//! Ok(())
34//! })?;
35//!
36//! // Analyze results
37//! let mean_ns = report.samples.iter()
38//! .map(|s| s.duration_ns)
39//! .sum::<u64>() / report.samples.len() as u64;
40//!
41//! println!("Mean: {} ns", mean_ns);
42//! # Ok::<(), TimingError>(())
43//! ```
44//!
45//! ## Types
46//!
47//! | Type | Description |
48//! |------|-------------|
49//! | [`BenchSpec`] | Benchmark configuration (name, iterations, warmup) |
50//! | [`BenchSample`] | Single timing measurement in nanoseconds |
51//! | [`BenchReport`] | Complete results with all samples |
52//! | [`TimingError`] | Error conditions during benchmarking |
53//!
54//! ## Feature Flags
55//!
56//! This module is always available. When using `mobench-sdk` with default features,
57//! you also get build automation and template generation. For minimal binary size
58//! (e.g., on mobile targets), use the `runner-only` feature:
59//!
60//! ```toml
61//! [dependencies]
62//! mobench-sdk = { version = "0.1", default-features = false, features = ["runner-only"] }
63//! ```
64
65use serde::{Deserialize, Serialize};
66use std::time::{Duration, Instant};
67use thiserror::Error;
68
69/// Benchmark specification defining what and how to benchmark.
70///
71/// Contains the benchmark name, number of measurement iterations, and
72/// warmup iterations to perform before measuring.
73///
74/// # Example
75///
76/// ```
77/// use mobench_sdk::timing::BenchSpec;
78///
79/// // Create a spec for 100 iterations with 10 warmup runs
80/// let spec = BenchSpec::new("sorting_benchmark", 100, 10)?;
81///
82/// assert_eq!(spec.name, "sorting_benchmark");
83/// assert_eq!(spec.iterations, 100);
84/// assert_eq!(spec.warmup, 10);
85/// # Ok::<(), mobench_sdk::timing::TimingError>(())
86/// ```
87///
88/// # Serialization
89///
90/// `BenchSpec` implements `Serialize` and `Deserialize` for JSON persistence:
91///
92/// ```
93/// use mobench_sdk::timing::BenchSpec;
94///
95/// let spec = BenchSpec {
96/// name: "my_bench".to_string(),
97/// iterations: 50,
98/// warmup: 5,
99/// };
100///
101/// let json = serde_json::to_string(&spec)?;
102/// let restored: BenchSpec = serde_json::from_str(&json)?;
103///
104/// assert_eq!(spec.name, restored.name);
105/// # Ok::<(), serde_json::Error>(())
106/// ```
107#[derive(Clone, Debug, Serialize, Deserialize)]
108pub struct BenchSpec {
109 /// Name of the benchmark, typically the fully-qualified function name.
110 ///
111 /// Examples: `"my_crate::fibonacci"`, `"sorting_benchmark"`
112 pub name: String,
113
114 /// Number of iterations to measure.
115 ///
116 /// Each iteration produces one [`BenchSample`]. Must be greater than zero.
117 pub iterations: u32,
118
119 /// Number of warmup iterations before measurement.
120 ///
121 /// Warmup iterations are not recorded. They allow CPU caches to warm
122 /// and any JIT compilation to complete. Can be zero.
123 pub warmup: u32,
124}
125
126impl BenchSpec {
127 /// Creates a new benchmark specification.
128 ///
129 /// # Arguments
130 ///
131 /// * `name` - Name identifier for the benchmark
132 /// * `iterations` - Number of measured iterations (must be > 0)
133 /// * `warmup` - Number of warmup iterations (can be 0)
134 ///
135 /// # Errors
136 ///
137 /// Returns [`TimingError::NoIterations`] if `iterations` is zero.
138 ///
139 /// # Example
140 ///
141 /// ```
142 /// use mobench_sdk::timing::BenchSpec;
143 ///
144 /// let spec = BenchSpec::new("test", 100, 10)?;
145 /// assert_eq!(spec.iterations, 100);
146 ///
147 /// // Zero iterations is an error
148 /// let err = BenchSpec::new("test", 0, 10);
149 /// assert!(err.is_err());
150 /// # Ok::<(), mobench_sdk::timing::TimingError>(())
151 /// ```
152 pub fn new(name: impl Into<String>, iterations: u32, warmup: u32) -> Result<Self, TimingError> {
153 if iterations == 0 {
154 return Err(TimingError::NoIterations { count: iterations });
155 }
156
157 Ok(Self {
158 name: name.into(),
159 iterations,
160 warmup,
161 })
162 }
163}
164
165/// A single timing sample from a benchmark iteration.
166///
167/// Contains the elapsed time in nanoseconds for one execution of the
168/// benchmark function.
169///
170/// # Example
171///
172/// ```
173/// use mobench_sdk::timing::BenchSample;
174///
175/// let sample = BenchSample { duration_ns: 1_500_000 };
176///
177/// // Convert to milliseconds
178/// let ms = sample.duration_ns as f64 / 1_000_000.0;
179/// assert_eq!(ms, 1.5);
180/// ```
181#[derive(Clone, Debug, Serialize, Deserialize)]
182pub struct BenchSample {
183 /// Duration of the iteration in nanoseconds.
184 ///
185 /// Measured using [`std::time::Instant`] for monotonic, high-resolution timing.
186 pub duration_ns: u64,
187}
188
189impl BenchSample {
190 /// Creates a sample from a [`Duration`].
191 fn from_duration(duration: Duration) -> Self {
192 Self {
193 duration_ns: duration.as_nanos() as u64,
194 }
195 }
196}
197
198/// Complete benchmark report with all timing samples.
199///
200/// Contains the original specification and all collected samples.
201/// Can be serialized to JSON for storage or transmission.
202///
203/// # Example
204///
205/// ```
206/// use mobench_sdk::timing::{BenchSpec, run_closure};
207///
208/// let spec = BenchSpec::new("example", 50, 5)?;
209/// let report = run_closure(spec, || {
210/// std::hint::black_box(42);
211/// Ok(())
212/// })?;
213///
214/// // Calculate statistics
215/// let samples: Vec<u64> = report.samples.iter()
216/// .map(|s| s.duration_ns)
217/// .collect();
218///
219/// let min = samples.iter().min().unwrap();
220/// let max = samples.iter().max().unwrap();
221/// let mean = samples.iter().sum::<u64>() / samples.len() as u64;
222///
223/// println!("Min: {} ns, Max: {} ns, Mean: {} ns", min, max, mean);
224/// # Ok::<(), mobench_sdk::timing::TimingError>(())
225/// ```
226#[derive(Clone, Debug, Serialize, Deserialize)]
227pub struct BenchReport {
228 /// The specification used for this benchmark run.
229 pub spec: BenchSpec,
230
231 /// All collected timing samples.
232 ///
233 /// The length equals `spec.iterations`. Samples are in execution order.
234 pub samples: Vec<BenchSample>,
235}
236
237/// Errors that can occur during benchmark execution.
238///
239/// # Example
240///
241/// ```
242/// use mobench_sdk::timing::{BenchSpec, TimingError};
243///
244/// // Zero iterations produces an error
245/// let result = BenchSpec::new("test", 0, 10);
246/// assert!(matches!(result, Err(TimingError::NoIterations { .. })));
247/// ```
248#[derive(Debug, Error)]
249pub enum TimingError {
250 /// The iteration count was zero or invalid.
251 ///
252 /// At least one iteration is required to produce a measurement.
253 /// The error includes the actual value provided for diagnostic purposes.
254 #[error("iterations must be greater than zero (got {count}). Minimum recommended: 10")]
255 NoIterations {
256 /// The invalid iteration count that was provided.
257 count: u32,
258 },
259
260 /// The benchmark function failed during execution.
261 ///
262 /// Contains a description of the failure.
263 #[error("benchmark function failed: {0}")]
264 Execution(String),
265}
266
267/// Runs a benchmark by executing a closure repeatedly.
268///
269/// This is the core benchmarking function. It:
270///
271/// 1. Executes the closure `spec.warmup` times without recording
272/// 2. Executes the closure `spec.iterations` times, recording each duration
273/// 3. Returns a [`BenchReport`] with all samples
274///
275/// # Arguments
276///
277/// * `spec` - Benchmark configuration specifying iterations and warmup
278/// * `f` - Closure to benchmark; must return `Result<(), TimingError>`
279///
280/// # Returns
281///
282/// A [`BenchReport`] containing all timing samples, or a [`TimingError`] if
283/// the benchmark fails.
284///
285/// # Example
286///
287/// ```
288/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
289///
290/// let spec = BenchSpec::new("sum_benchmark", 100, 10)?;
291///
292/// let report = run_closure(spec, || {
293/// let sum: u64 = (0..1000).sum();
294/// std::hint::black_box(sum);
295/// Ok(())
296/// })?;
297///
298/// assert_eq!(report.samples.len(), 100);
299///
300/// // Calculate mean duration
301/// let total_ns: u64 = report.samples.iter().map(|s| s.duration_ns).sum();
302/// let mean_ns = total_ns / report.samples.len() as u64;
303/// println!("Mean: {} ns", mean_ns);
304/// # Ok::<(), TimingError>(())
305/// ```
306///
307/// # Error Handling
308///
309/// If the closure returns an error, the benchmark stops immediately:
310///
311/// ```
312/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
313///
314/// let spec = BenchSpec::new("failing_bench", 100, 0)?;
315///
316/// let result = run_closure(spec, || {
317/// Err(TimingError::Execution("simulated failure".into()))
318/// });
319///
320/// assert!(result.is_err());
321/// # Ok::<(), TimingError>(())
322/// ```
323///
324/// # Timing Precision
325///
326/// Uses [`std::time::Instant`] for timing, which provides monotonic,
327/// nanosecond-resolution measurements on most platforms.
328pub fn run_closure<F>(spec: BenchSpec, mut f: F) -> Result<BenchReport, TimingError>
329where
330 F: FnMut() -> Result<(), TimingError>,
331{
332 if spec.iterations == 0 {
333 return Err(TimingError::NoIterations {
334 count: spec.iterations,
335 });
336 }
337
338 // Warmup phase - not measured
339 for _ in 0..spec.warmup {
340 f()?;
341 }
342
343 // Measurement phase
344 let mut samples = Vec::with_capacity(spec.iterations as usize);
345 for _ in 0..spec.iterations {
346 let start = Instant::now();
347 f()?;
348 samples.push(BenchSample::from_duration(start.elapsed()));
349 }
350
351 Ok(BenchReport { spec, samples })
352}
353
354/// Runs a benchmark with setup that executes once before all iterations.
355///
356/// The setup function is called once before timing begins, then the benchmark
357/// runs multiple times using a reference to the setup result. This is useful
358/// for expensive initialization that shouldn't be included in timing.
359///
360/// # Arguments
361///
362/// * `spec` - Benchmark configuration specifying iterations and warmup
363/// * `setup` - Function that creates the input data (called once, not timed)
364/// * `f` - Benchmark closure that receives a reference to setup result
365///
366/// # Example
367///
368/// ```ignore
369/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup};
370///
371/// fn setup_data() -> Vec<u8> {
372/// vec![0u8; 1_000_000] // Expensive allocation not measured
373/// }
374///
375/// let spec = BenchSpec::new("hash_benchmark", 100, 10)?;
376/// let report = run_closure_with_setup(spec, setup_data, |data| {
377/// std::hint::black_box(compute_hash(data));
378/// Ok(())
379/// })?;
380/// ```
381pub fn run_closure_with_setup<S, T, F>(
382 spec: BenchSpec,
383 setup: S,
384 mut f: F,
385) -> Result<BenchReport, TimingError>
386where
387 S: FnOnce() -> T,
388 F: FnMut(&T) -> Result<(), TimingError>,
389{
390 if spec.iterations == 0 {
391 return Err(TimingError::NoIterations {
392 count: spec.iterations,
393 });
394 }
395
396 // Setup phase - not timed
397 let input = setup();
398
399 // Warmup phase - not recorded
400 for _ in 0..spec.warmup {
401 f(&input)?;
402 }
403
404 // Measurement phase
405 let mut samples = Vec::with_capacity(spec.iterations as usize);
406 for _ in 0..spec.iterations {
407 let start = Instant::now();
408 f(&input)?;
409 samples.push(BenchSample::from_duration(start.elapsed()));
410 }
411
412 Ok(BenchReport { spec, samples })
413}
414
415/// Runs a benchmark with per-iteration setup.
416///
417/// Setup runs before each iteration and is not timed. The benchmark takes
418/// ownership of the setup result, making this suitable for benchmarks that
419/// mutate their input (e.g., sorting).
420///
421/// # Arguments
422///
423/// * `spec` - Benchmark configuration specifying iterations and warmup
424/// * `setup` - Function that creates fresh input for each iteration (not timed)
425/// * `f` - Benchmark closure that takes ownership of setup result
426///
427/// # Example
428///
429/// ```ignore
430/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_per_iter};
431///
432/// fn generate_random_vec() -> Vec<i32> {
433/// (0..1000).map(|_| rand::random()).collect()
434/// }
435///
436/// let spec = BenchSpec::new("sort_benchmark", 100, 10)?;
437/// let report = run_closure_with_setup_per_iter(spec, generate_random_vec, |mut data| {
438/// data.sort();
439/// std::hint::black_box(data);
440/// Ok(())
441/// })?;
442/// ```
443pub fn run_closure_with_setup_per_iter<S, T, F>(
444 spec: BenchSpec,
445 mut setup: S,
446 mut f: F,
447) -> Result<BenchReport, TimingError>
448where
449 S: FnMut() -> T,
450 F: FnMut(T) -> Result<(), TimingError>,
451{
452 if spec.iterations == 0 {
453 return Err(TimingError::NoIterations {
454 count: spec.iterations,
455 });
456 }
457
458 // Warmup phase
459 for _ in 0..spec.warmup {
460 let input = setup();
461 f(input)?;
462 }
463
464 // Measurement phase
465 let mut samples = Vec::with_capacity(spec.iterations as usize);
466 for _ in 0..spec.iterations {
467 let input = setup(); // Not timed
468
469 let start = Instant::now();
470 f(input)?; // Only this is timed
471 samples.push(BenchSample::from_duration(start.elapsed()));
472 }
473
474 Ok(BenchReport { spec, samples })
475}
476
477/// Runs a benchmark with setup and teardown.
478///
479/// Setup runs once before all iterations, teardown runs once after all
480/// iterations complete. Neither is included in timing.
481///
482/// # Arguments
483///
484/// * `spec` - Benchmark configuration specifying iterations and warmup
485/// * `setup` - Function that creates the input data (called once, not timed)
486/// * `f` - Benchmark closure that receives a reference to setup result
487/// * `teardown` - Function that cleans up the input (called once, not timed)
488///
489/// # Example
490///
491/// ```ignore
492/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_teardown};
493///
494/// fn setup_db() -> Database { Database::connect("test.db") }
495/// fn cleanup_db(db: Database) { db.close(); std::fs::remove_file("test.db").ok(); }
496///
497/// let spec = BenchSpec::new("db_benchmark", 100, 10)?;
498/// let report = run_closure_with_setup_teardown(
499/// spec,
500/// setup_db,
501/// |db| { db.query("SELECT *"); Ok(()) },
502/// cleanup_db,
503/// )?;
504/// ```
505pub fn run_closure_with_setup_teardown<S, T, F, D>(
506 spec: BenchSpec,
507 setup: S,
508 mut f: F,
509 teardown: D,
510) -> Result<BenchReport, TimingError>
511where
512 S: FnOnce() -> T,
513 F: FnMut(&T) -> Result<(), TimingError>,
514 D: FnOnce(T),
515{
516 if spec.iterations == 0 {
517 return Err(TimingError::NoIterations {
518 count: spec.iterations,
519 });
520 }
521
522 // Setup phase - not timed
523 let input = setup();
524
525 // Warmup phase
526 for _ in 0..spec.warmup {
527 f(&input)?;
528 }
529
530 // Measurement phase
531 let mut samples = Vec::with_capacity(spec.iterations as usize);
532 for _ in 0..spec.iterations {
533 let start = Instant::now();
534 f(&input)?;
535 samples.push(BenchSample::from_duration(start.elapsed()));
536 }
537
538 // Teardown phase - not timed
539 teardown(input);
540
541 Ok(BenchReport { spec, samples })
542}
543
544#[cfg(test)]
545mod tests {
546 use super::*;
547
548 #[test]
549 fn runs_benchmark() {
550 let spec = BenchSpec::new("noop", 3, 1).unwrap();
551 let report = run_closure(spec, || Ok(())).unwrap();
552
553 assert_eq!(report.samples.len(), 3);
554 let non_zero = report.samples.iter().filter(|s| s.duration_ns > 0).count();
555 assert!(non_zero >= 1);
556 }
557
558 #[test]
559 fn rejects_zero_iterations() {
560 let result = BenchSpec::new("test", 0, 10);
561 assert!(matches!(result, Err(TimingError::NoIterations { count: 0 })));
562 }
563
564 #[test]
565 fn allows_zero_warmup() {
566 let spec = BenchSpec::new("test", 5, 0).unwrap();
567 assert_eq!(spec.warmup, 0);
568
569 let report = run_closure(spec, || Ok(())).unwrap();
570 assert_eq!(report.samples.len(), 5);
571 }
572
573 #[test]
574 fn serializes_to_json() {
575 let spec = BenchSpec::new("test", 10, 2).unwrap();
576 let report = run_closure(spec, || Ok(())).unwrap();
577
578 let json = serde_json::to_string(&report).unwrap();
579 let restored: BenchReport = serde_json::from_str(&json).unwrap();
580
581 assert_eq!(restored.spec.name, "test");
582 assert_eq!(restored.samples.len(), 10);
583 }
584
585 #[test]
586 fn run_with_setup_calls_setup_once() {
587 use std::sync::atomic::{AtomicU32, Ordering};
588
589 static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
590 static RUN_COUNT: AtomicU32 = AtomicU32::new(0);
591
592 let spec = BenchSpec::new("test", 5, 2).unwrap();
593 let report = run_closure_with_setup(
594 spec,
595 || {
596 SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
597 vec![1, 2, 3]
598 },
599 |data| {
600 RUN_COUNT.fetch_add(1, Ordering::SeqCst);
601 std::hint::black_box(data.len());
602 Ok(())
603 },
604 )
605 .unwrap();
606
607 assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1); // Setup called once
608 assert_eq!(RUN_COUNT.load(Ordering::SeqCst), 7); // 2 warmup + 5 iterations
609 assert_eq!(report.samples.len(), 5);
610 }
611
612 #[test]
613 fn run_with_setup_per_iter_calls_setup_each_time() {
614 use std::sync::atomic::{AtomicU32, Ordering};
615
616 static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
617
618 let spec = BenchSpec::new("test", 3, 1).unwrap();
619 let report = run_closure_with_setup_per_iter(
620 spec,
621 || {
622 SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
623 vec![1, 2, 3]
624 },
625 |data| {
626 std::hint::black_box(data);
627 Ok(())
628 },
629 )
630 .unwrap();
631
632 assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 4); // 1 warmup + 3 iterations
633 assert_eq!(report.samples.len(), 3);
634 }
635
636 #[test]
637 fn run_with_setup_teardown_calls_both() {
638 use std::sync::atomic::{AtomicU32, Ordering};
639
640 static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
641 static TEARDOWN_COUNT: AtomicU32 = AtomicU32::new(0);
642
643 let spec = BenchSpec::new("test", 3, 1).unwrap();
644 let report = run_closure_with_setup_teardown(
645 spec,
646 || {
647 SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
648 "resource"
649 },
650 |_resource| {
651 Ok(())
652 },
653 |_resource| {
654 TEARDOWN_COUNT.fetch_add(1, Ordering::SeqCst);
655 },
656 )
657 .unwrap();
658
659 assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1);
660 assert_eq!(TEARDOWN_COUNT.load(Ordering::SeqCst), 1);
661 assert_eq!(report.samples.len(), 3);
662 }
663}