Skip to main content

mobench_sdk/
timing.rs

1//! Lightweight benchmarking harness for mobile platforms.
2//!
3//! This module provides the core timing infrastructure for the mobench ecosystem.
4//! It was previously a separate crate (`mobench-runner`) but has been consolidated
5//! into `mobench-sdk` for a simpler dependency graph.
6//!
7//! The module is designed to be minimal and portable, with no platform-specific
8//! dependencies, making it suitable for compilation to Android and iOS targets.
9//!
10//! ## Overview
11//!
12//! The timing module executes benchmark functions with:
13//! - Configurable warmup iterations
14//! - Precise nanosecond-resolution timing
15//! - Simple, serializable results
16//!
17//! ## Usage
18//!
19//! Most users should use this via the higher-level [`crate::run_benchmark`] function
20//! or [`crate::BenchmarkBuilder`]. Direct usage is for custom integrations:
21//!
22//! ```
23//! use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
24//!
25//! // Define a benchmark specification
26//! let spec = BenchSpec::new("my_benchmark", 100, 10)?;
27//!
28//! // Run the benchmark
29//! let report = run_closure(spec, || {
30//!     // Your benchmark code
31//!     let sum: u64 = (0..1000).sum();
32//!     std::hint::black_box(sum);
33//!     Ok(())
34//! })?;
35//!
36//! // Analyze results
37//! let mean_ns = report.samples.iter()
38//!     .map(|s| s.duration_ns)
39//!     .sum::<u64>() / report.samples.len() as u64;
40//!
41//! println!("Mean: {} ns", mean_ns);
42//! # Ok::<(), TimingError>(())
43//! ```
44//!
45//! ## Types
46//!
47//! | Type | Description |
48//! |------|-------------|
49//! | [`BenchSpec`] | Benchmark configuration (name, iterations, warmup) |
50//! | [`BenchSample`] | Single timing measurement in nanoseconds |
51//! | [`BenchReport`] | Complete results with all samples |
52//! | [`TimingError`] | Error conditions during benchmarking |
53//!
54//! ## Feature Flags
55//!
56//! This module is always available. When using `mobench-sdk` with default features,
57//! you also get build automation and template generation. For minimal binary size
58//! (e.g., on mobile targets), use the `runner-only` feature:
59//!
60//! ```toml
61//! [dependencies]
62//! mobench-sdk = { version = "0.1", default-features = false, features = ["runner-only"] }
63//! ```
64
65use serde::{Deserialize, Serialize};
66use std::cell::RefCell;
67use std::time::{Duration, Instant};
68use thiserror::Error;
69
70/// Benchmark specification defining what and how to benchmark.
71///
72/// Contains the benchmark name, number of measurement iterations, and
73/// warmup iterations to perform before measuring.
74///
75/// # Example
76///
77/// ```
78/// use mobench_sdk::timing::BenchSpec;
79///
80/// // Create a spec for 100 iterations with 10 warmup runs
81/// let spec = BenchSpec::new("sorting_benchmark", 100, 10)?;
82///
83/// assert_eq!(spec.name, "sorting_benchmark");
84/// assert_eq!(spec.iterations, 100);
85/// assert_eq!(spec.warmup, 10);
86/// # Ok::<(), mobench_sdk::timing::TimingError>(())
87/// ```
88///
89/// # Serialization
90///
91/// `BenchSpec` implements `Serialize` and `Deserialize` for JSON persistence:
92///
93/// ```
94/// use mobench_sdk::timing::BenchSpec;
95///
96/// let spec = BenchSpec {
97///     name: "my_bench".to_string(),
98///     iterations: 50,
99///     warmup: 5,
100/// };
101///
102/// let json = serde_json::to_string(&spec)?;
103/// let restored: BenchSpec = serde_json::from_str(&json)?;
104///
105/// assert_eq!(spec.name, restored.name);
106/// # Ok::<(), serde_json::Error>(())
107/// ```
108#[derive(Clone, Debug, Serialize, Deserialize)]
109pub struct BenchSpec {
110    /// Name of the benchmark, typically the fully-qualified function name.
111    ///
112    /// Examples: `"my_crate::fibonacci"`, `"sorting_benchmark"`
113    pub name: String,
114
115    /// Number of iterations to measure.
116    ///
117    /// Each iteration produces one [`BenchSample`]. Must be greater than zero.
118    pub iterations: u32,
119
120    /// Number of warmup iterations before measurement.
121    ///
122    /// Warmup iterations are not recorded. They allow CPU caches to warm
123    /// and any JIT compilation to complete. Can be zero.
124    pub warmup: u32,
125}
126
127impl BenchSpec {
128    /// Creates a new benchmark specification.
129    ///
130    /// # Arguments
131    ///
132    /// * `name` - Name identifier for the benchmark
133    /// * `iterations` - Number of measured iterations (must be > 0)
134    /// * `warmup` - Number of warmup iterations (can be 0)
135    ///
136    /// # Errors
137    ///
138    /// Returns [`TimingError::NoIterations`] if `iterations` is zero.
139    ///
140    /// # Example
141    ///
142    /// ```
143    /// use mobench_sdk::timing::BenchSpec;
144    ///
145    /// let spec = BenchSpec::new("test", 100, 10)?;
146    /// assert_eq!(spec.iterations, 100);
147    ///
148    /// // Zero iterations is an error
149    /// let err = BenchSpec::new("test", 0, 10);
150    /// assert!(err.is_err());
151    /// # Ok::<(), mobench_sdk::timing::TimingError>(())
152    /// ```
153    pub fn new(name: impl Into<String>, iterations: u32, warmup: u32) -> Result<Self, TimingError> {
154        if iterations == 0 {
155            return Err(TimingError::NoIterations { count: iterations });
156        }
157
158        Ok(Self {
159            name: name.into(),
160            iterations,
161            warmup,
162        })
163    }
164}
165
166/// A single timing sample from a benchmark iteration.
167///
168/// Contains the elapsed time in nanoseconds for one execution of the
169/// benchmark function.
170///
171/// # Example
172///
173/// ```
174/// use mobench_sdk::timing::BenchSample;
175///
176/// let sample = BenchSample { duration_ns: 1_500_000 };
177///
178/// // Convert to milliseconds
179/// let ms = sample.duration_ns as f64 / 1_000_000.0;
180/// assert_eq!(ms, 1.5);
181/// ```
182#[derive(Clone, Debug, Serialize, Deserialize)]
183pub struct BenchSample {
184    /// Duration of the iteration in nanoseconds.
185    ///
186    /// Measured using [`std::time::Instant`] for monotonic, high-resolution timing.
187    pub duration_ns: u64,
188}
189
190impl BenchSample {
191    /// Creates a sample from a [`Duration`].
192    fn from_duration(duration: Duration) -> Self {
193        Self {
194            duration_ns: duration.as_nanos() as u64,
195        }
196    }
197}
198
199/// Complete benchmark report with all timing samples.
200///
201/// Contains the original specification and all collected samples.
202/// Can be serialized to JSON for storage or transmission.
203///
204/// # Example
205///
206/// ```
207/// use mobench_sdk::timing::{BenchSpec, run_closure};
208///
209/// let spec = BenchSpec::new("example", 50, 5)?;
210/// let report = run_closure(spec, || {
211///     std::hint::black_box(42);
212///     Ok(())
213/// })?;
214///
215/// // Calculate statistics
216/// let samples: Vec<u64> = report.samples.iter()
217///     .map(|s| s.duration_ns)
218///     .collect();
219///
220/// let min = samples.iter().min().unwrap();
221/// let max = samples.iter().max().unwrap();
222/// let mean = samples.iter().sum::<u64>() / samples.len() as u64;
223///
224/// println!("Min: {} ns, Max: {} ns, Mean: {} ns", min, max, mean);
225/// # Ok::<(), mobench_sdk::timing::TimingError>(())
226/// ```
227#[derive(Clone, Debug, Serialize, Deserialize)]
228pub struct BenchReport {
229    /// The specification used for this benchmark run.
230    pub spec: BenchSpec,
231
232    /// All collected timing samples.
233    ///
234    /// The length equals `spec.iterations`. Samples are in execution order.
235    pub samples: Vec<BenchSample>,
236
237    /// Optional semantic phase timings captured during measured iterations.
238    pub phases: Vec<SemanticPhase>,
239}
240
241impl BenchReport {
242    /// Returns the mean (average) duration in nanoseconds.
243    #[must_use]
244    pub fn mean_ns(&self) -> f64 {
245        if self.samples.is_empty() {
246            return 0.0;
247        }
248        let sum: u64 = self.samples.iter().map(|s| s.duration_ns).sum();
249        sum as f64 / self.samples.len() as f64
250    }
251
252    /// Returns the median duration in nanoseconds.
253    #[must_use]
254    pub fn median_ns(&self) -> f64 {
255        if self.samples.is_empty() {
256            return 0.0;
257        }
258        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
259        sorted.sort_unstable();
260        let len = sorted.len();
261        if len % 2 == 0 {
262            (sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
263        } else {
264            sorted[len / 2] as f64
265        }
266    }
267
268    /// Returns the standard deviation in nanoseconds (sample std dev, n-1).
269    #[must_use]
270    pub fn std_dev_ns(&self) -> f64 {
271        if self.samples.len() < 2 {
272            return 0.0;
273        }
274        let mean = self.mean_ns();
275        let variance: f64 = self
276            .samples
277            .iter()
278            .map(|s| {
279                let diff = s.duration_ns as f64 - mean;
280                diff * diff
281            })
282            .sum::<f64>()
283            / (self.samples.len() - 1) as f64;
284        variance.sqrt()
285    }
286
287    /// Returns the given percentile (0-100) in nanoseconds.
288    #[must_use]
289    pub fn percentile_ns(&self, p: f64) -> f64 {
290        if self.samples.is_empty() {
291            return 0.0;
292        }
293        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
294        sorted.sort_unstable();
295        let p = p.clamp(0.0, 100.0) / 100.0;
296        let index = (p * (sorted.len() - 1) as f64).round() as usize;
297        sorted[index.min(sorted.len() - 1)] as f64
298    }
299
300    /// Returns the minimum duration in nanoseconds.
301    #[must_use]
302    pub fn min_ns(&self) -> u64 {
303        self.samples
304            .iter()
305            .map(|s| s.duration_ns)
306            .min()
307            .unwrap_or(0)
308    }
309
310    /// Returns the maximum duration in nanoseconds.
311    #[must_use]
312    pub fn max_ns(&self) -> u64 {
313        self.samples
314            .iter()
315            .map(|s| s.duration_ns)
316            .max()
317            .unwrap_or(0)
318    }
319
320    /// Returns a statistical summary of the benchmark results.
321    #[must_use]
322    pub fn summary(&self) -> BenchSummary {
323        BenchSummary {
324            name: self.spec.name.clone(),
325            iterations: self.samples.len() as u32,
326            warmup: self.spec.warmup,
327            mean_ns: self.mean_ns(),
328            median_ns: self.median_ns(),
329            std_dev_ns: self.std_dev_ns(),
330            min_ns: self.min_ns(),
331            max_ns: self.max_ns(),
332            p95_ns: self.percentile_ns(95.0),
333            p99_ns: self.percentile_ns(99.0),
334        }
335    }
336}
337
338/// Statistical summary of benchmark results.
339#[derive(Clone, Debug, Serialize, Deserialize)]
340pub struct BenchSummary {
341    /// Name of the benchmark.
342    pub name: String,
343    /// Number of measured iterations.
344    pub iterations: u32,
345    /// Number of warmup iterations.
346    pub warmup: u32,
347    /// Mean duration in nanoseconds.
348    pub mean_ns: f64,
349    /// Median duration in nanoseconds.
350    pub median_ns: f64,
351    /// Standard deviation in nanoseconds.
352    pub std_dev_ns: f64,
353    /// Minimum duration in nanoseconds.
354    pub min_ns: u64,
355    /// Maximum duration in nanoseconds.
356    pub max_ns: u64,
357    /// 95th percentile in nanoseconds.
358    pub p95_ns: f64,
359    /// 99th percentile in nanoseconds.
360    pub p99_ns: f64,
361}
362
363/// Flat semantic phase timing captured during a benchmark run.
364#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
365pub struct SemanticPhase {
366    pub name: String,
367    pub duration_ns: u64,
368}
369
370#[derive(Default)]
371struct SemanticPhaseCollector {
372    enabled: bool,
373    depth: usize,
374    phases: Vec<SemanticPhase>,
375}
376
377impl SemanticPhaseCollector {
378    fn reset(&mut self) {
379        self.enabled = false;
380        self.depth = 0;
381        self.phases.clear();
382    }
383
384    fn begin_measurement(&mut self) {
385        self.reset();
386        self.enabled = true;
387    }
388
389    fn finish(&mut self) -> Vec<SemanticPhase> {
390        self.enabled = false;
391        self.depth = 0;
392        std::mem::take(&mut self.phases)
393    }
394
395    fn enter_phase(&mut self) -> Option<bool> {
396        if !self.enabled {
397            return None;
398        }
399        let top_level = self.depth == 0;
400        self.depth += 1;
401        Some(top_level)
402    }
403
404    fn exit_phase(&mut self, name: &str, top_level: bool, elapsed: Duration) {
405        self.depth = self.depth.saturating_sub(1);
406        if !self.enabled || !top_level {
407            return;
408        }
409
410        let duration_ns = elapsed.as_nanos().min(u128::from(u64::MAX)) as u64;
411        if let Some(phase) = self.phases.iter_mut().find(|phase| phase.name == name) {
412            phase.duration_ns = phase.duration_ns.saturating_add(duration_ns);
413        } else {
414            self.phases.push(SemanticPhase {
415                name: name.to_string(),
416                duration_ns,
417            });
418        }
419    }
420}
421
422thread_local! {
423    static SEMANTIC_PHASE_COLLECTOR: RefCell<SemanticPhaseCollector> =
424        RefCell::new(SemanticPhaseCollector::default());
425}
426
427struct SemanticPhaseGuard {
428    name: String,
429    started_at: Option<Instant>,
430    top_level: bool,
431}
432
433impl Drop for SemanticPhaseGuard {
434    fn drop(&mut self) {
435        let Some(started_at) = self.started_at else {
436            return;
437        };
438
439        let elapsed = started_at.elapsed();
440        SEMANTIC_PHASE_COLLECTOR.with(|collector| {
441            collector
442                .borrow_mut()
443                .exit_phase(&self.name, self.top_level, elapsed);
444        });
445    }
446}
447
448fn reset_semantic_phase_collection() {
449    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().reset());
450}
451
452fn begin_semantic_phase_collection() {
453    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().begin_measurement());
454}
455
456fn finish_semantic_phase_collection() -> Vec<SemanticPhase> {
457    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().finish())
458}
459
460/// Records a flat semantic phase when called inside an active benchmark measurement loop.
461///
462/// Phases are aggregated across measured iterations and ignored during warmup/setup.
463/// Nested phases are intentionally collapsed in v1 to keep the output flat.
464pub fn profile_phase<T>(name: &str, f: impl FnOnce() -> T) -> T {
465    let guard = SEMANTIC_PHASE_COLLECTOR.with(|collector| {
466        let mut collector = collector.borrow_mut();
467        match collector.enter_phase() {
468            Some(top_level) => SemanticPhaseGuard {
469                name: name.to_string(),
470                started_at: Some(Instant::now()),
471                top_level,
472            },
473            None => SemanticPhaseGuard {
474                name: String::new(),
475                started_at: None,
476                top_level: false,
477            },
478        }
479    });
480
481    let result = f();
482    drop(guard);
483    result
484}
485
486/// Errors that can occur during benchmark execution.
487///
488/// # Example
489///
490/// ```
491/// use mobench_sdk::timing::{BenchSpec, TimingError};
492///
493/// // Zero iterations produces an error
494/// let result = BenchSpec::new("test", 0, 10);
495/// assert!(matches!(result, Err(TimingError::NoIterations { .. })));
496/// ```
497#[derive(Debug, Error)]
498pub enum TimingError {
499    /// The iteration count was zero or invalid.
500    ///
501    /// At least one iteration is required to produce a measurement.
502    /// The error includes the actual value provided for diagnostic purposes.
503    #[error("iterations must be greater than zero (got {count}). Minimum recommended: 10")]
504    NoIterations {
505        /// The invalid iteration count that was provided.
506        count: u32,
507    },
508
509    /// The benchmark function failed during execution.
510    ///
511    /// Contains a description of the failure.
512    #[error("benchmark function failed: {0}")]
513    Execution(String),
514}
515
516/// Runs a benchmark by executing a closure repeatedly.
517///
518/// This is the core benchmarking function. It:
519///
520/// 1. Executes the closure `spec.warmup` times without recording
521/// 2. Executes the closure `spec.iterations` times, recording each duration
522/// 3. Returns a [`BenchReport`] with all samples
523///
524/// # Arguments
525///
526/// * `spec` - Benchmark configuration specifying iterations and warmup
527/// * `f` - Closure to benchmark; must return `Result<(), TimingError>`
528///
529/// # Returns
530///
531/// A [`BenchReport`] containing all timing samples, or a [`TimingError`] if
532/// the benchmark fails.
533///
534/// # Example
535///
536/// ```
537/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
538///
539/// let spec = BenchSpec::new("sum_benchmark", 100, 10)?;
540///
541/// let report = run_closure(spec, || {
542///     let sum: u64 = (0..1000).sum();
543///     std::hint::black_box(sum);
544///     Ok(())
545/// })?;
546///
547/// assert_eq!(report.samples.len(), 100);
548///
549/// // Calculate mean duration
550/// let total_ns: u64 = report.samples.iter().map(|s| s.duration_ns).sum();
551/// let mean_ns = total_ns / report.samples.len() as u64;
552/// println!("Mean: {} ns", mean_ns);
553/// # Ok::<(), TimingError>(())
554/// ```
555///
556/// # Error Handling
557///
558/// If the closure returns an error, the benchmark stops immediately:
559///
560/// ```
561/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
562///
563/// let spec = BenchSpec::new("failing_bench", 100, 0)?;
564///
565/// let result = run_closure(spec, || {
566///     Err(TimingError::Execution("simulated failure".into()))
567/// });
568///
569/// assert!(result.is_err());
570/// # Ok::<(), TimingError>(())
571/// ```
572///
573/// # Timing Precision
574///
575/// Uses [`std::time::Instant`] for timing, which provides monotonic,
576/// nanosecond-resolution measurements on most platforms.
577pub fn run_closure<F>(spec: BenchSpec, mut f: F) -> Result<BenchReport, TimingError>
578where
579    F: FnMut() -> Result<(), TimingError>,
580{
581    if spec.iterations == 0 {
582        return Err(TimingError::NoIterations {
583            count: spec.iterations,
584        });
585    }
586
587    reset_semantic_phase_collection();
588
589    // Warmup phase - not measured
590    for _ in 0..spec.warmup {
591        f()?;
592    }
593
594    // Measurement phase
595    begin_semantic_phase_collection();
596    let mut samples = Vec::with_capacity(spec.iterations as usize);
597    for _ in 0..spec.iterations {
598        let start = Instant::now();
599        if let Err(err) = f() {
600            let _ = finish_semantic_phase_collection();
601            return Err(err);
602        }
603        samples.push(BenchSample::from_duration(start.elapsed()));
604    }
605    let phases = finish_semantic_phase_collection();
606
607    Ok(BenchReport {
608        spec,
609        samples,
610        phases,
611    })
612}
613
614/// Runs a benchmark with setup that executes once before all iterations.
615///
616/// The setup function is called once before timing begins, then the benchmark
617/// runs multiple times using a reference to the setup result. This is useful
618/// for expensive initialization that shouldn't be included in timing.
619///
620/// # Arguments
621///
622/// * `spec` - Benchmark configuration specifying iterations and warmup
623/// * `setup` - Function that creates the input data (called once, not timed)
624/// * `f` - Benchmark closure that receives a reference to setup result
625///
626/// # Example
627///
628/// ```ignore
629/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup};
630///
631/// fn setup_data() -> Vec<u8> {
632///     vec![0u8; 1_000_000]  // Expensive allocation not measured
633/// }
634///
635/// let spec = BenchSpec::new("hash_benchmark", 100, 10)?;
636/// let report = run_closure_with_setup(spec, setup_data, |data| {
637///     std::hint::black_box(compute_hash(data));
638///     Ok(())
639/// })?;
640/// ```
641pub fn run_closure_with_setup<S, T, F>(
642    spec: BenchSpec,
643    setup: S,
644    mut f: F,
645) -> Result<BenchReport, TimingError>
646where
647    S: FnOnce() -> T,
648    F: FnMut(&T) -> Result<(), TimingError>,
649{
650    if spec.iterations == 0 {
651        return Err(TimingError::NoIterations {
652            count: spec.iterations,
653        });
654    }
655
656    reset_semantic_phase_collection();
657
658    // Setup phase - not timed
659    let input = setup();
660
661    // Warmup phase - not recorded
662    for _ in 0..spec.warmup {
663        f(&input)?;
664    }
665
666    // Measurement phase
667    begin_semantic_phase_collection();
668    let mut samples = Vec::with_capacity(spec.iterations as usize);
669    for _ in 0..spec.iterations {
670        let start = Instant::now();
671        if let Err(err) = f(&input) {
672            let _ = finish_semantic_phase_collection();
673            return Err(err);
674        }
675        samples.push(BenchSample::from_duration(start.elapsed()));
676    }
677    let phases = finish_semantic_phase_collection();
678
679    Ok(BenchReport {
680        spec,
681        samples,
682        phases,
683    })
684}
685
686/// Runs a benchmark with per-iteration setup.
687///
688/// Setup runs before each iteration and is not timed. The benchmark takes
689/// ownership of the setup result, making this suitable for benchmarks that
690/// mutate their input (e.g., sorting).
691///
692/// # Arguments
693///
694/// * `spec` - Benchmark configuration specifying iterations and warmup
695/// * `setup` - Function that creates fresh input for each iteration (not timed)
696/// * `f` - Benchmark closure that takes ownership of setup result
697///
698/// # Example
699///
700/// ```ignore
701/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_per_iter};
702///
703/// fn generate_random_vec() -> Vec<i32> {
704///     (0..1000).map(|_| rand::random()).collect()
705/// }
706///
707/// let spec = BenchSpec::new("sort_benchmark", 100, 10)?;
708/// let report = run_closure_with_setup_per_iter(spec, generate_random_vec, |mut data| {
709///     data.sort();
710///     std::hint::black_box(data);
711///     Ok(())
712/// })?;
713/// ```
714pub fn run_closure_with_setup_per_iter<S, T, F>(
715    spec: BenchSpec,
716    mut setup: S,
717    mut f: F,
718) -> Result<BenchReport, TimingError>
719where
720    S: FnMut() -> T,
721    F: FnMut(T) -> Result<(), TimingError>,
722{
723    if spec.iterations == 0 {
724        return Err(TimingError::NoIterations {
725            count: spec.iterations,
726        });
727    }
728
729    reset_semantic_phase_collection();
730
731    // Warmup phase
732    for _ in 0..spec.warmup {
733        let input = setup();
734        f(input)?;
735    }
736
737    // Measurement phase
738    begin_semantic_phase_collection();
739    let mut samples = Vec::with_capacity(spec.iterations as usize);
740    for _ in 0..spec.iterations {
741        let input = setup(); // Not timed
742
743        let start = Instant::now();
744        if let Err(err) = f(input) {
745            let _ = finish_semantic_phase_collection();
746            return Err(err);
747        }
748        samples.push(BenchSample::from_duration(start.elapsed()));
749    }
750    let phases = finish_semantic_phase_collection();
751
752    Ok(BenchReport {
753        spec,
754        samples,
755        phases,
756    })
757}
758
759/// Runs a benchmark with setup and teardown.
760///
761/// Setup runs once before all iterations, teardown runs once after all
762/// iterations complete. Neither is included in timing.
763///
764/// # Arguments
765///
766/// * `spec` - Benchmark configuration specifying iterations and warmup
767/// * `setup` - Function that creates the input data (called once, not timed)
768/// * `f` - Benchmark closure that receives a reference to setup result
769/// * `teardown` - Function that cleans up the input (called once, not timed)
770///
771/// # Example
772///
773/// ```ignore
774/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_teardown};
775///
776/// fn setup_db() -> Database { Database::connect("test.db") }
777/// fn cleanup_db(db: Database) { db.close(); std::fs::remove_file("test.db").ok(); }
778///
779/// let spec = BenchSpec::new("db_benchmark", 100, 10)?;
780/// let report = run_closure_with_setup_teardown(
781///     spec,
782///     setup_db,
783///     |db| { db.query("SELECT *"); Ok(()) },
784///     cleanup_db,
785/// )?;
786/// ```
787pub fn run_closure_with_setup_teardown<S, T, F, D>(
788    spec: BenchSpec,
789    setup: S,
790    mut f: F,
791    teardown: D,
792) -> Result<BenchReport, TimingError>
793where
794    S: FnOnce() -> T,
795    F: FnMut(&T) -> Result<(), TimingError>,
796    D: FnOnce(T),
797{
798    if spec.iterations == 0 {
799        return Err(TimingError::NoIterations {
800            count: spec.iterations,
801        });
802    }
803
804    reset_semantic_phase_collection();
805
806    // Setup phase - not timed
807    let input = setup();
808
809    // Warmup phase
810    for _ in 0..spec.warmup {
811        f(&input)?;
812    }
813
814    // Measurement phase
815    begin_semantic_phase_collection();
816    let mut samples = Vec::with_capacity(spec.iterations as usize);
817    for _ in 0..spec.iterations {
818        let start = Instant::now();
819        if let Err(err) = f(&input) {
820            let _ = finish_semantic_phase_collection();
821            return Err(err);
822        }
823        samples.push(BenchSample::from_duration(start.elapsed()));
824    }
825    let phases = finish_semantic_phase_collection();
826
827    // Teardown phase - not timed
828    teardown(input);
829
830    Ok(BenchReport {
831        spec,
832        samples,
833        phases,
834    })
835}
836
837#[cfg(test)]
838mod tests {
839    use super::*;
840
841    #[test]
842    fn runs_benchmark_collects_requested_samples() {
843        let spec = BenchSpec::new("noop", 3, 1).unwrap();
844        let report = run_closure(spec, || Ok(())).unwrap();
845
846        assert_eq!(report.samples.len(), 3);
847        assert_eq!(report.spec.name, "noop");
848        assert_eq!(report.spec.iterations, 3);
849    }
850
851    #[test]
852    fn rejects_zero_iterations() {
853        let result = BenchSpec::new("test", 0, 10);
854        assert!(matches!(
855            result,
856            Err(TimingError::NoIterations { count: 0 })
857        ));
858    }
859
860    #[test]
861    fn allows_zero_warmup() {
862        let spec = BenchSpec::new("test", 5, 0).unwrap();
863        assert_eq!(spec.warmup, 0);
864
865        let report = run_closure(spec, || Ok(())).unwrap();
866        assert_eq!(report.samples.len(), 5);
867    }
868
869    #[test]
870    fn serializes_to_json() {
871        let spec = BenchSpec::new("test", 10, 2).unwrap();
872        let report = run_closure(spec, || {
873            profile_phase("prove", || std::thread::sleep(Duration::from_millis(1)));
874            Ok(())
875        })
876        .unwrap();
877
878        let json = serde_json::to_string(&report).unwrap();
879        let restored: BenchReport = serde_json::from_str(&json).unwrap();
880
881        assert_eq!(restored.spec.name, "test");
882        assert_eq!(restored.samples.len(), 10);
883        assert_eq!(restored.phases.len(), 1);
884        assert_eq!(restored.phases[0].name, "prove");
885        assert!(restored.phases[0].duration_ns > 0);
886    }
887
888    #[test]
889    fn profile_phase_records_only_measured_iterations() {
890        let spec = BenchSpec::new("semantic", 2, 1).unwrap();
891        let mut call_index = 0u32;
892        let report = run_closure(spec, || {
893            let phase_name = if call_index == 0 {
894                "warmup-only"
895            } else {
896                "prove"
897            };
898            call_index += 1;
899            profile_phase(phase_name, || std::thread::sleep(Duration::from_millis(1)));
900            Ok(())
901        })
902        .unwrap();
903
904        assert!(
905            !report
906                .phases
907                .iter()
908                .any(|phase| phase.name == "warmup-only"),
909            "warmup phases should not be recorded"
910        );
911        let prove = report
912            .phases
913            .iter()
914            .find(|phase| phase.name == "prove")
915            .expect("prove phase");
916        assert!(prove.duration_ns > 0);
917    }
918
919    #[test]
920    fn profile_phase_keeps_the_v1_model_flat() {
921        let spec = BenchSpec::new("semantic-flat", 1, 0).unwrap();
922        let report = run_closure(spec, || {
923            profile_phase("prove", || {
924                std::thread::sleep(Duration::from_millis(1));
925                profile_phase("inner", || std::thread::sleep(Duration::from_millis(1)));
926            });
927            Ok(())
928        })
929        .unwrap();
930
931        assert!(report.phases.iter().any(|phase| phase.name == "prove"));
932        assert!(
933            !report.phases.iter().any(|phase| phase.name == "inner"),
934            "nested phases should not create a second flat phase entry"
935        );
936    }
937
938    #[test]
939    fn run_with_setup_calls_setup_once() {
940        use std::sync::atomic::{AtomicU32, Ordering};
941
942        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
943        static RUN_COUNT: AtomicU32 = AtomicU32::new(0);
944
945        let spec = BenchSpec::new("test", 5, 2).unwrap();
946        let report = run_closure_with_setup(
947            spec,
948            || {
949                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
950                vec![1, 2, 3]
951            },
952            |data| {
953                RUN_COUNT.fetch_add(1, Ordering::SeqCst);
954                std::hint::black_box(data.len());
955                Ok(())
956            },
957        )
958        .unwrap();
959
960        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1); // Setup called once
961        assert_eq!(RUN_COUNT.load(Ordering::SeqCst), 7); // 2 warmup + 5 iterations
962        assert_eq!(report.samples.len(), 5);
963    }
964
965    #[test]
966    fn run_with_setup_per_iter_calls_setup_each_time() {
967        use std::sync::atomic::{AtomicU32, Ordering};
968
969        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
970
971        let spec = BenchSpec::new("test", 3, 1).unwrap();
972        let report = run_closure_with_setup_per_iter(
973            spec,
974            || {
975                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
976                vec![1, 2, 3]
977            },
978            |data| {
979                std::hint::black_box(data);
980                Ok(())
981            },
982        )
983        .unwrap();
984
985        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 4); // 1 warmup + 3 iterations
986        assert_eq!(report.samples.len(), 3);
987    }
988
989    #[test]
990    fn run_with_setup_teardown_calls_both() {
991        use std::sync::atomic::{AtomicU32, Ordering};
992
993        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
994        static TEARDOWN_COUNT: AtomicU32 = AtomicU32::new(0);
995
996        let spec = BenchSpec::new("test", 3, 1).unwrap();
997        let report = run_closure_with_setup_teardown(
998            spec,
999            || {
1000                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
1001                "resource"
1002            },
1003            |_resource| Ok(()),
1004            |_resource| {
1005                TEARDOWN_COUNT.fetch_add(1, Ordering::SeqCst);
1006            },
1007        )
1008        .unwrap();
1009
1010        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1);
1011        assert_eq!(TEARDOWN_COUNT.load(Ordering::SeqCst), 1);
1012        assert_eq!(report.samples.len(), 3);
1013    }
1014}