Skip to main content

mobench_sdk/
timing.rs

1//! Lightweight benchmarking harness for mobile platforms.
2//!
3//! This module provides the core timing infrastructure for the mobench ecosystem.
4//! It was previously a separate crate (`mobench-runner`) but has been consolidated
5//! into `mobench-sdk` for a simpler dependency graph.
6//!
7//! The module is designed to be minimal and portable, with no platform-specific
8//! dependencies, making it suitable for compilation to Android and iOS targets.
9//!
10//! ## Overview
11//!
12//! The timing module executes benchmark functions with:
13//! - Configurable warmup iterations
14//! - Precise nanosecond-resolution timing
15//! - Simple, serializable results
16//!
17//! ## Usage
18//!
19//! Most users should use this via the higher-level [`crate::run_benchmark`] function
20//! or [`crate::BenchmarkBuilder`]. Direct usage is for custom integrations:
21//!
22//! ```
23//! use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
24//!
25//! // Define a benchmark specification
26//! let spec = BenchSpec::new("my_benchmark", 100, 10)?;
27//!
28//! // Run the benchmark
29//! let report = run_closure(spec, || {
30//!     // Your benchmark code
31//!     let sum: u64 = (0..1000).sum();
32//!     std::hint::black_box(sum);
33//!     Ok(())
34//! })?;
35//!
36//! // Analyze results
37//! let mean_ns = report.samples.iter()
38//!     .map(|s| s.duration_ns)
39//!     .sum::<u64>() / report.samples.len() as u64;
40//!
41//! println!("Mean: {} ns", mean_ns);
42//! # Ok::<(), TimingError>(())
43//! ```
44//!
45//! ## Types
46//!
47//! | Type | Description |
48//! |------|-------------|
49//! | [`BenchSpec`] | Benchmark configuration (name, iterations, warmup) |
50//! | [`BenchSample`] | Single timing measurement in nanoseconds |
51//! | [`BenchReport`] | Complete results with all samples |
52//! | [`TimingError`] | Error conditions during benchmarking |
53//!
54//! ## Feature Flags
55//!
56//! This module is always available. When using `mobench-sdk` with default features,
57//! you also get build automation and template generation. For minimal binary size
58//! (e.g., on mobile targets), use the `runner-only` feature:
59//!
60//! ```toml
61//! [dependencies]
62//! mobench-sdk = { version = "0.1", default-features = false, features = ["runner-only"] }
63//! ```
64
65use serde::{Deserialize, Serialize};
66use std::cell::RefCell;
67use std::time::{Duration, Instant};
68use thiserror::Error;
69
70/// Benchmark specification defining what and how to benchmark.
71///
72/// Contains the benchmark name, number of measurement iterations, and
73/// warmup iterations to perform before measuring.
74///
75/// # Example
76///
77/// ```
78/// use mobench_sdk::timing::BenchSpec;
79///
80/// // Create a spec for 100 iterations with 10 warmup runs
81/// let spec = BenchSpec::new("sorting_benchmark", 100, 10)?;
82///
83/// assert_eq!(spec.name, "sorting_benchmark");
84/// assert_eq!(spec.iterations, 100);
85/// assert_eq!(spec.warmup, 10);
86/// # Ok::<(), mobench_sdk::timing::TimingError>(())
87/// ```
88///
89/// # Serialization
90///
91/// `BenchSpec` implements `Serialize` and `Deserialize` for JSON persistence:
92///
93/// ```
94/// use mobench_sdk::timing::BenchSpec;
95///
96/// let spec = BenchSpec {
97///     name: "my_bench".to_string(),
98///     iterations: 50,
99///     warmup: 5,
100/// };
101///
102/// let json = serde_json::to_string(&spec)?;
103/// let restored: BenchSpec = serde_json::from_str(&json)?;
104///
105/// assert_eq!(spec.name, restored.name);
106/// # Ok::<(), serde_json::Error>(())
107/// ```
108#[derive(Clone, Debug, Serialize, Deserialize)]
109pub struct BenchSpec {
110    /// Name of the benchmark, typically the fully-qualified function name.
111    ///
112    /// Examples: `"my_crate::fibonacci"`, `"sorting_benchmark"`
113    pub name: String,
114
115    /// Number of iterations to measure.
116    ///
117    /// Each iteration produces one [`BenchSample`]. Must be greater than zero.
118    pub iterations: u32,
119
120    /// Number of warmup iterations before measurement.
121    ///
122    /// Warmup iterations are not recorded. They allow CPU caches to warm
123    /// and any JIT compilation to complete. Can be zero.
124    pub warmup: u32,
125}
126
127impl BenchSpec {
128    /// Creates a new benchmark specification.
129    ///
130    /// # Arguments
131    ///
132    /// * `name` - Name identifier for the benchmark
133    /// * `iterations` - Number of measured iterations (must be > 0)
134    /// * `warmup` - Number of warmup iterations (can be 0)
135    ///
136    /// # Errors
137    ///
138    /// Returns [`TimingError::NoIterations`] if `iterations` is zero.
139    ///
140    /// # Example
141    ///
142    /// ```
143    /// use mobench_sdk::timing::BenchSpec;
144    ///
145    /// let spec = BenchSpec::new("test", 100, 10)?;
146    /// assert_eq!(spec.iterations, 100);
147    ///
148    /// // Zero iterations is an error
149    /// let err = BenchSpec::new("test", 0, 10);
150    /// assert!(err.is_err());
151    /// # Ok::<(), mobench_sdk::timing::TimingError>(())
152    /// ```
153    pub fn new(name: impl Into<String>, iterations: u32, warmup: u32) -> Result<Self, TimingError> {
154        if iterations == 0 {
155            return Err(TimingError::NoIterations { count: iterations });
156        }
157
158        Ok(Self {
159            name: name.into(),
160            iterations,
161            warmup,
162        })
163    }
164}
165
166/// A single timing sample from a benchmark iteration.
167///
168/// Contains the elapsed time in nanoseconds for one execution of the
169/// benchmark function.
170///
171/// # Example
172///
173/// ```
174/// use mobench_sdk::timing::BenchSample;
175///
176/// let sample = BenchSample { duration_ns: 1_500_000 };
177///
178/// // Convert to milliseconds
179/// let ms = sample.duration_ns as f64 / 1_000_000.0;
180/// assert_eq!(ms, 1.5);
181/// ```
182#[derive(Clone, Debug, Serialize, Deserialize)]
183pub struct BenchSample {
184    /// Duration of the iteration in nanoseconds.
185    ///
186    /// Measured using [`std::time::Instant`] for monotonic, high-resolution timing.
187    pub duration_ns: u64,
188}
189
190impl BenchSample {
191    /// Creates a sample from a [`Duration`].
192    fn from_duration(duration: Duration) -> Self {
193        Self {
194            duration_ns: duration.as_nanos() as u64,
195        }
196    }
197}
198
199/// Complete benchmark report with all timing samples.
200///
201/// Contains the original specification and all collected samples.
202/// Can be serialized to JSON for storage or transmission.
203///
204/// # Example
205///
206/// ```
207/// use mobench_sdk::timing::{BenchSpec, run_closure};
208///
209/// let spec = BenchSpec::new("example", 50, 5)?;
210/// let report = run_closure(spec, || {
211///     std::hint::black_box(42);
212///     Ok(())
213/// })?;
214///
215/// // Calculate statistics
216/// let samples: Vec<u64> = report.samples.iter()
217///     .map(|s| s.duration_ns)
218///     .collect();
219///
220/// let min = samples.iter().min().unwrap();
221/// let max = samples.iter().max().unwrap();
222/// let mean = samples.iter().sum::<u64>() / samples.len() as u64;
223///
224/// println!("Min: {} ns, Max: {} ns, Mean: {} ns", min, max, mean);
225/// # Ok::<(), mobench_sdk::timing::TimingError>(())
226/// ```
227#[derive(Clone, Debug, Serialize, Deserialize)]
228pub struct BenchReport {
229    /// The specification used for this benchmark run.
230    pub spec: BenchSpec,
231
232    /// All collected timing samples.
233    ///
234    /// The length equals `spec.iterations`. Samples are in execution order.
235    pub samples: Vec<BenchSample>,
236
237    /// Optional semantic phase timings captured during measured iterations.
238    pub phases: Vec<SemanticPhase>,
239
240    /// Exact harness timeline spans in execution order.
241    pub timeline: Vec<HarnessTimelineSpan>,
242}
243
244#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
245pub struct HarnessTimelineSpan {
246    pub phase: String,
247    pub start_offset_ns: u64,
248    pub end_offset_ns: u64,
249    pub iteration: Option<u32>,
250}
251
252impl BenchReport {
253    /// Returns the mean (average) duration in nanoseconds.
254    #[must_use]
255    pub fn mean_ns(&self) -> f64 {
256        if self.samples.is_empty() {
257            return 0.0;
258        }
259        let sum: u64 = self.samples.iter().map(|s| s.duration_ns).sum();
260        sum as f64 / self.samples.len() as f64
261    }
262
263    /// Returns the median duration in nanoseconds.
264    #[must_use]
265    pub fn median_ns(&self) -> f64 {
266        if self.samples.is_empty() {
267            return 0.0;
268        }
269        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
270        sorted.sort_unstable();
271        let len = sorted.len();
272        if len % 2 == 0 {
273            (sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
274        } else {
275            sorted[len / 2] as f64
276        }
277    }
278
279    /// Returns the standard deviation in nanoseconds (sample std dev, n-1).
280    #[must_use]
281    pub fn std_dev_ns(&self) -> f64 {
282        if self.samples.len() < 2 {
283            return 0.0;
284        }
285        let mean = self.mean_ns();
286        let variance: f64 = self
287            .samples
288            .iter()
289            .map(|s| {
290                let diff = s.duration_ns as f64 - mean;
291                diff * diff
292            })
293            .sum::<f64>()
294            / (self.samples.len() - 1) as f64;
295        variance.sqrt()
296    }
297
298    /// Returns the given percentile (0-100) in nanoseconds.
299    #[must_use]
300    pub fn percentile_ns(&self, p: f64) -> f64 {
301        if self.samples.is_empty() {
302            return 0.0;
303        }
304        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
305        sorted.sort_unstable();
306        let p = p.clamp(0.0, 100.0) / 100.0;
307        let index = (p * (sorted.len() - 1) as f64).round() as usize;
308        sorted[index.min(sorted.len() - 1)] as f64
309    }
310
311    /// Returns the minimum duration in nanoseconds.
312    #[must_use]
313    pub fn min_ns(&self) -> u64 {
314        self.samples
315            .iter()
316            .map(|s| s.duration_ns)
317            .min()
318            .unwrap_or(0)
319    }
320
321    /// Returns the maximum duration in nanoseconds.
322    #[must_use]
323    pub fn max_ns(&self) -> u64 {
324        self.samples
325            .iter()
326            .map(|s| s.duration_ns)
327            .max()
328            .unwrap_or(0)
329    }
330
331    /// Returns a statistical summary of the benchmark results.
332    #[must_use]
333    pub fn summary(&self) -> BenchSummary {
334        BenchSummary {
335            name: self.spec.name.clone(),
336            iterations: self.samples.len() as u32,
337            warmup: self.spec.warmup,
338            mean_ns: self.mean_ns(),
339            median_ns: self.median_ns(),
340            std_dev_ns: self.std_dev_ns(),
341            min_ns: self.min_ns(),
342            max_ns: self.max_ns(),
343            p95_ns: self.percentile_ns(95.0),
344            p99_ns: self.percentile_ns(99.0),
345        }
346    }
347}
348
349fn instant_offset_ns(origin: Instant, instant: Instant) -> u64 {
350    instant
351        .duration_since(origin)
352        .as_nanos()
353        .min(u128::from(u64::MAX)) as u64
354}
355
356fn push_timeline_span(
357    timeline: &mut Vec<HarnessTimelineSpan>,
358    origin: Instant,
359    phase: &str,
360    started_at: Instant,
361    ended_at: Instant,
362    iteration: Option<u32>,
363) {
364    timeline.push(HarnessTimelineSpan {
365        phase: phase.to_string(),
366        start_offset_ns: instant_offset_ns(origin, started_at),
367        end_offset_ns: instant_offset_ns(origin, ended_at),
368        iteration,
369    });
370}
371
372/// Statistical summary of benchmark results.
373#[derive(Clone, Debug, Serialize, Deserialize)]
374pub struct BenchSummary {
375    /// Name of the benchmark.
376    pub name: String,
377    /// Number of measured iterations.
378    pub iterations: u32,
379    /// Number of warmup iterations.
380    pub warmup: u32,
381    /// Mean duration in nanoseconds.
382    pub mean_ns: f64,
383    /// Median duration in nanoseconds.
384    pub median_ns: f64,
385    /// Standard deviation in nanoseconds.
386    pub std_dev_ns: f64,
387    /// Minimum duration in nanoseconds.
388    pub min_ns: u64,
389    /// Maximum duration in nanoseconds.
390    pub max_ns: u64,
391    /// 95th percentile in nanoseconds.
392    pub p95_ns: f64,
393    /// 99th percentile in nanoseconds.
394    pub p99_ns: f64,
395}
396
397/// Flat semantic phase timing captured during a benchmark run.
398#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
399pub struct SemanticPhase {
400    pub name: String,
401    pub duration_ns: u64,
402}
403
404#[derive(Default)]
405struct SemanticPhaseCollector {
406    enabled: bool,
407    depth: usize,
408    phases: Vec<SemanticPhase>,
409}
410
411impl SemanticPhaseCollector {
412    fn reset(&mut self) {
413        self.enabled = false;
414        self.depth = 0;
415        self.phases.clear();
416    }
417
418    fn begin_measurement(&mut self) {
419        self.reset();
420        self.enabled = true;
421    }
422
423    fn finish(&mut self) -> Vec<SemanticPhase> {
424        self.enabled = false;
425        self.depth = 0;
426        std::mem::take(&mut self.phases)
427    }
428
429    fn enter_phase(&mut self) -> Option<bool> {
430        if !self.enabled {
431            return None;
432        }
433        let top_level = self.depth == 0;
434        self.depth += 1;
435        Some(top_level)
436    }
437
438    fn exit_phase(&mut self, name: &str, top_level: bool, elapsed: Duration) {
439        self.depth = self.depth.saturating_sub(1);
440        if !self.enabled || !top_level {
441            return;
442        }
443
444        let duration_ns = elapsed.as_nanos().min(u128::from(u64::MAX)) as u64;
445        if let Some(phase) = self.phases.iter_mut().find(|phase| phase.name == name) {
446            phase.duration_ns = phase.duration_ns.saturating_add(duration_ns);
447        } else {
448            self.phases.push(SemanticPhase {
449                name: name.to_string(),
450                duration_ns,
451            });
452        }
453    }
454}
455
456thread_local! {
457    static SEMANTIC_PHASE_COLLECTOR: RefCell<SemanticPhaseCollector> =
458        RefCell::new(SemanticPhaseCollector::default());
459}
460
461struct SemanticPhaseGuard {
462    name: String,
463    started_at: Option<Instant>,
464    top_level: bool,
465}
466
467impl Drop for SemanticPhaseGuard {
468    fn drop(&mut self) {
469        let Some(started_at) = self.started_at else {
470            return;
471        };
472
473        let elapsed = started_at.elapsed();
474        SEMANTIC_PHASE_COLLECTOR.with(|collector| {
475            collector
476                .borrow_mut()
477                .exit_phase(&self.name, self.top_level, elapsed);
478        });
479    }
480}
481
482fn reset_semantic_phase_collection() {
483    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().reset());
484}
485
486fn begin_semantic_phase_collection() {
487    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().begin_measurement());
488}
489
490fn finish_semantic_phase_collection() -> Vec<SemanticPhase> {
491    SEMANTIC_PHASE_COLLECTOR.with(|collector| collector.borrow_mut().finish())
492}
493
494/// Records a flat semantic phase when called inside an active benchmark measurement loop.
495///
496/// Phases are aggregated across measured iterations and ignored during warmup/setup.
497/// Nested phases are intentionally collapsed in v1 to keep the output flat.
498pub fn profile_phase<T>(name: &str, f: impl FnOnce() -> T) -> T {
499    let guard = SEMANTIC_PHASE_COLLECTOR.with(|collector| {
500        let mut collector = collector.borrow_mut();
501        match collector.enter_phase() {
502            Some(top_level) => SemanticPhaseGuard {
503                name: name.to_string(),
504                started_at: Some(Instant::now()),
505                top_level,
506            },
507            None => SemanticPhaseGuard {
508                name: String::new(),
509                started_at: None,
510                top_level: false,
511            },
512        }
513    });
514
515    let result = f();
516    drop(guard);
517    result
518}
519
520/// Errors that can occur during benchmark execution.
521///
522/// # Example
523///
524/// ```
525/// use mobench_sdk::timing::{BenchSpec, TimingError};
526///
527/// // Zero iterations produces an error
528/// let result = BenchSpec::new("test", 0, 10);
529/// assert!(matches!(result, Err(TimingError::NoIterations { .. })));
530/// ```
531#[derive(Debug, Error)]
532pub enum TimingError {
533    /// The iteration count was zero or invalid.
534    ///
535    /// At least one iteration is required to produce a measurement.
536    /// The error includes the actual value provided for diagnostic purposes.
537    #[error("iterations must be greater than zero (got {count}). Minimum recommended: 10")]
538    NoIterations {
539        /// The invalid iteration count that was provided.
540        count: u32,
541    },
542
543    /// The benchmark function failed during execution.
544    ///
545    /// Contains a description of the failure.
546    #[error("benchmark function failed: {0}")]
547    Execution(String),
548}
549
550/// Runs a benchmark by executing a closure repeatedly.
551///
552/// This is the core benchmarking function. It:
553///
554/// 1. Executes the closure `spec.warmup` times without recording
555/// 2. Executes the closure `spec.iterations` times, recording each duration
556/// 3. Returns a [`BenchReport`] with all samples
557///
558/// # Arguments
559///
560/// * `spec` - Benchmark configuration specifying iterations and warmup
561/// * `f` - Closure to benchmark; must return `Result<(), TimingError>`
562///
563/// # Returns
564///
565/// A [`BenchReport`] containing all timing samples, or a [`TimingError`] if
566/// the benchmark fails.
567///
568/// # Example
569///
570/// ```
571/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
572///
573/// let spec = BenchSpec::new("sum_benchmark", 100, 10)?;
574///
575/// let report = run_closure(spec, || {
576///     let sum: u64 = (0..1000).sum();
577///     std::hint::black_box(sum);
578///     Ok(())
579/// })?;
580///
581/// assert_eq!(report.samples.len(), 100);
582///
583/// // Calculate mean duration
584/// let total_ns: u64 = report.samples.iter().map(|s| s.duration_ns).sum();
585/// let mean_ns = total_ns / report.samples.len() as u64;
586/// println!("Mean: {} ns", mean_ns);
587/// # Ok::<(), TimingError>(())
588/// ```
589///
590/// # Error Handling
591///
592/// If the closure returns an error, the benchmark stops immediately:
593///
594/// ```
595/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
596///
597/// let spec = BenchSpec::new("failing_bench", 100, 0)?;
598///
599/// let result = run_closure(spec, || {
600///     Err(TimingError::Execution("simulated failure".into()))
601/// });
602///
603/// assert!(result.is_err());
604/// # Ok::<(), TimingError>(())
605/// ```
606///
607/// # Timing Precision
608///
609/// Uses [`std::time::Instant`] for timing, which provides monotonic,
610/// nanosecond-resolution measurements on most platforms.
611pub fn run_closure<F>(spec: BenchSpec, mut f: F) -> Result<BenchReport, TimingError>
612where
613    F: FnMut() -> Result<(), TimingError>,
614{
615    if spec.iterations == 0 {
616        return Err(TimingError::NoIterations {
617            count: spec.iterations,
618        });
619    }
620
621    reset_semantic_phase_collection();
622    let harness_origin = Instant::now();
623    let mut timeline = Vec::new();
624
625    // Warmup phase - not measured
626    for iteration in 0..spec.warmup {
627        let phase_start = Instant::now();
628        f()?;
629        push_timeline_span(
630            &mut timeline,
631            harness_origin,
632            "warmup-benchmark",
633            phase_start,
634            Instant::now(),
635            Some(iteration),
636        );
637    }
638
639    // Measurement phase
640    begin_semantic_phase_collection();
641    let mut samples = Vec::with_capacity(spec.iterations as usize);
642    for iteration in 0..spec.iterations {
643        let start = Instant::now();
644        if let Err(err) = f() {
645            let _ = finish_semantic_phase_collection();
646            return Err(err);
647        }
648        let end = Instant::now();
649        samples.push(BenchSample::from_duration(end.duration_since(start)));
650        push_timeline_span(
651            &mut timeline,
652            harness_origin,
653            "measured-benchmark",
654            start,
655            end,
656            Some(iteration),
657        );
658    }
659    let phases = finish_semantic_phase_collection();
660
661    Ok(BenchReport {
662        spec,
663        samples,
664        phases,
665        timeline,
666    })
667}
668
669/// Runs a benchmark with setup that executes once before all iterations.
670///
671/// The setup function is called once before timing begins, then the benchmark
672/// runs multiple times using a reference to the setup result. This is useful
673/// for expensive initialization that shouldn't be included in timing.
674///
675/// # Arguments
676///
677/// * `spec` - Benchmark configuration specifying iterations and warmup
678/// * `setup` - Function that creates the input data (called once, not timed)
679/// * `f` - Benchmark closure that receives a reference to setup result
680///
681/// # Example
682///
683/// ```ignore
684/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup};
685///
686/// fn setup_data() -> Vec<u8> {
687///     vec![0u8; 1_000_000]  // Expensive allocation not measured
688/// }
689///
690/// let spec = BenchSpec::new("hash_benchmark", 100, 10)?;
691/// let report = run_closure_with_setup(spec, setup_data, |data| {
692///     std::hint::black_box(compute_hash(data));
693///     Ok(())
694/// })?;
695/// ```
696pub fn run_closure_with_setup<S, T, F>(
697    spec: BenchSpec,
698    setup: S,
699    mut f: F,
700) -> Result<BenchReport, TimingError>
701where
702    S: FnOnce() -> T,
703    F: FnMut(&T) -> Result<(), TimingError>,
704{
705    if spec.iterations == 0 {
706        return Err(TimingError::NoIterations {
707            count: spec.iterations,
708        });
709    }
710
711    reset_semantic_phase_collection();
712    let harness_origin = Instant::now();
713    let mut timeline = Vec::new();
714
715    // Setup phase - not timed
716    let setup_start = Instant::now();
717    let input = setup();
718    push_timeline_span(
719        &mut timeline,
720        harness_origin,
721        "setup",
722        setup_start,
723        Instant::now(),
724        None,
725    );
726
727    // Warmup phase - not recorded
728    for iteration in 0..spec.warmup {
729        let phase_start = Instant::now();
730        f(&input)?;
731        push_timeline_span(
732            &mut timeline,
733            harness_origin,
734            "warmup-benchmark",
735            phase_start,
736            Instant::now(),
737            Some(iteration),
738        );
739    }
740
741    // Measurement phase
742    begin_semantic_phase_collection();
743    let mut samples = Vec::with_capacity(spec.iterations as usize);
744    for iteration in 0..spec.iterations {
745        let start = Instant::now();
746        if let Err(err) = f(&input) {
747            let _ = finish_semantic_phase_collection();
748            return Err(err);
749        }
750        let end = Instant::now();
751        samples.push(BenchSample::from_duration(end.duration_since(start)));
752        push_timeline_span(
753            &mut timeline,
754            harness_origin,
755            "measured-benchmark",
756            start,
757            end,
758            Some(iteration),
759        );
760    }
761    let phases = finish_semantic_phase_collection();
762
763    Ok(BenchReport {
764        spec,
765        samples,
766        phases,
767        timeline,
768    })
769}
770
771/// Runs a benchmark with per-iteration setup.
772///
773/// Setup runs before each iteration and is not timed. The benchmark takes
774/// ownership of the setup result, making this suitable for benchmarks that
775/// mutate their input (e.g., sorting).
776///
777/// # Arguments
778///
779/// * `spec` - Benchmark configuration specifying iterations and warmup
780/// * `setup` - Function that creates fresh input for each iteration (not timed)
781/// * `f` - Benchmark closure that takes ownership of setup result
782///
783/// # Example
784///
785/// ```ignore
786/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_per_iter};
787///
788/// fn generate_random_vec() -> Vec<i32> {
789///     (0..1000).map(|_| rand::random()).collect()
790/// }
791///
792/// let spec = BenchSpec::new("sort_benchmark", 100, 10)?;
793/// let report = run_closure_with_setup_per_iter(spec, generate_random_vec, |mut data| {
794///     data.sort();
795///     std::hint::black_box(data);
796///     Ok(())
797/// })?;
798/// ```
799pub fn run_closure_with_setup_per_iter<S, T, F>(
800    spec: BenchSpec,
801    mut setup: S,
802    mut f: F,
803) -> Result<BenchReport, TimingError>
804where
805    S: FnMut() -> T,
806    F: FnMut(T) -> Result<(), TimingError>,
807{
808    if spec.iterations == 0 {
809        return Err(TimingError::NoIterations {
810            count: spec.iterations,
811        });
812    }
813
814    reset_semantic_phase_collection();
815    let harness_origin = Instant::now();
816    let mut timeline = Vec::new();
817
818    // Warmup phase
819    for iteration in 0..spec.warmup {
820        let setup_start = Instant::now();
821        let input = setup();
822        push_timeline_span(
823            &mut timeline,
824            harness_origin,
825            "fixture-setup",
826            setup_start,
827            Instant::now(),
828            Some(iteration),
829        );
830        let phase_start = Instant::now();
831        f(input)?;
832        push_timeline_span(
833            &mut timeline,
834            harness_origin,
835            "warmup-benchmark",
836            phase_start,
837            Instant::now(),
838            Some(iteration),
839        );
840    }
841
842    // Measurement phase
843    begin_semantic_phase_collection();
844    let mut samples = Vec::with_capacity(spec.iterations as usize);
845    for iteration in 0..spec.iterations {
846        let setup_start = Instant::now();
847        let input = setup(); // Not timed
848        push_timeline_span(
849            &mut timeline,
850            harness_origin,
851            "fixture-setup",
852            setup_start,
853            Instant::now(),
854            Some(iteration),
855        );
856
857        let start = Instant::now();
858        if let Err(err) = f(input) {
859            let _ = finish_semantic_phase_collection();
860            return Err(err);
861        }
862        let end = Instant::now();
863        samples.push(BenchSample::from_duration(end.duration_since(start)));
864        push_timeline_span(
865            &mut timeline,
866            harness_origin,
867            "measured-benchmark",
868            start,
869            end,
870            Some(iteration),
871        );
872    }
873    let phases = finish_semantic_phase_collection();
874
875    Ok(BenchReport {
876        spec,
877        samples,
878        phases,
879        timeline,
880    })
881}
882
883/// Runs a benchmark with setup and teardown.
884///
885/// Setup runs once before all iterations, teardown runs once after all
886/// iterations complete. Neither is included in timing.
887///
888/// # Arguments
889///
890/// * `spec` - Benchmark configuration specifying iterations and warmup
891/// * `setup` - Function that creates the input data (called once, not timed)
892/// * `f` - Benchmark closure that receives a reference to setup result
893/// * `teardown` - Function that cleans up the input (called once, not timed)
894///
895/// # Example
896///
897/// ```ignore
898/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_teardown};
899///
900/// fn setup_db() -> Database { Database::connect("test.db") }
901/// fn cleanup_db(db: Database) { db.close(); std::fs::remove_file("test.db").ok(); }
902///
903/// let spec = BenchSpec::new("db_benchmark", 100, 10)?;
904/// let report = run_closure_with_setup_teardown(
905///     spec,
906///     setup_db,
907///     |db| { db.query("SELECT *"); Ok(()) },
908///     cleanup_db,
909/// )?;
910/// ```
911pub fn run_closure_with_setup_teardown<S, T, F, D>(
912    spec: BenchSpec,
913    setup: S,
914    mut f: F,
915    teardown: D,
916) -> Result<BenchReport, TimingError>
917where
918    S: FnOnce() -> T,
919    F: FnMut(&T) -> Result<(), TimingError>,
920    D: FnOnce(T),
921{
922    if spec.iterations == 0 {
923        return Err(TimingError::NoIterations {
924            count: spec.iterations,
925        });
926    }
927
928    reset_semantic_phase_collection();
929    let harness_origin = Instant::now();
930    let mut timeline = Vec::new();
931
932    // Setup phase - not timed
933    let setup_start = Instant::now();
934    let input = setup();
935    push_timeline_span(
936        &mut timeline,
937        harness_origin,
938        "setup",
939        setup_start,
940        Instant::now(),
941        None,
942    );
943
944    // Warmup phase
945    for iteration in 0..spec.warmup {
946        let phase_start = Instant::now();
947        f(&input)?;
948        push_timeline_span(
949            &mut timeline,
950            harness_origin,
951            "warmup-benchmark",
952            phase_start,
953            Instant::now(),
954            Some(iteration),
955        );
956    }
957
958    // Measurement phase
959    begin_semantic_phase_collection();
960    let mut samples = Vec::with_capacity(spec.iterations as usize);
961    for iteration in 0..spec.iterations {
962        let start = Instant::now();
963        if let Err(err) = f(&input) {
964            let _ = finish_semantic_phase_collection();
965            return Err(err);
966        }
967        let end = Instant::now();
968        samples.push(BenchSample::from_duration(end.duration_since(start)));
969        push_timeline_span(
970            &mut timeline,
971            harness_origin,
972            "measured-benchmark",
973            start,
974            end,
975            Some(iteration),
976        );
977    }
978    let phases = finish_semantic_phase_collection();
979
980    // Teardown phase - not timed
981    let teardown_start = Instant::now();
982    teardown(input);
983    push_timeline_span(
984        &mut timeline,
985        harness_origin,
986        "teardown",
987        teardown_start,
988        Instant::now(),
989        None,
990    );
991
992    Ok(BenchReport {
993        spec,
994        samples,
995        phases,
996        timeline,
997    })
998}
999
1000#[cfg(test)]
1001mod tests {
1002    use super::*;
1003
1004    #[test]
1005    fn runs_benchmark_collects_requested_samples() {
1006        let spec = BenchSpec::new("noop", 3, 1).unwrap();
1007        let report = run_closure(spec, || Ok(())).unwrap();
1008
1009        assert_eq!(report.samples.len(), 3);
1010        assert_eq!(report.spec.name, "noop");
1011        assert_eq!(report.spec.iterations, 3);
1012    }
1013
1014    #[test]
1015    fn rejects_zero_iterations() {
1016        let result = BenchSpec::new("test", 0, 10);
1017        assert!(matches!(
1018            result,
1019            Err(TimingError::NoIterations { count: 0 })
1020        ));
1021    }
1022
1023    #[test]
1024    fn allows_zero_warmup() {
1025        let spec = BenchSpec::new("test", 5, 0).unwrap();
1026        assert_eq!(spec.warmup, 0);
1027
1028        let report = run_closure(spec, || Ok(())).unwrap();
1029        assert_eq!(report.samples.len(), 5);
1030    }
1031
1032    #[test]
1033    fn serializes_to_json() {
1034        let spec = BenchSpec::new("test", 10, 2).unwrap();
1035        let report = run_closure(spec, || {
1036            profile_phase("prove", || std::thread::sleep(Duration::from_millis(1)));
1037            Ok(())
1038        })
1039        .unwrap();
1040
1041        let json = serde_json::to_string(&report).unwrap();
1042        let restored: BenchReport = serde_json::from_str(&json).unwrap();
1043
1044        assert_eq!(restored.spec.name, "test");
1045        assert_eq!(restored.samples.len(), 10);
1046        assert_eq!(restored.phases.len(), 1);
1047        assert_eq!(restored.phases[0].name, "prove");
1048        assert!(restored.phases[0].duration_ns > 0);
1049    }
1050
1051    #[test]
1052    fn profile_phase_records_only_measured_iterations() {
1053        let spec = BenchSpec::new("semantic", 2, 1).unwrap();
1054        let mut call_index = 0u32;
1055        let report = run_closure(spec, || {
1056            let phase_name = if call_index == 0 {
1057                "warmup-only"
1058            } else {
1059                "prove"
1060            };
1061            call_index += 1;
1062            profile_phase(phase_name, || std::thread::sleep(Duration::from_millis(1)));
1063            Ok(())
1064        })
1065        .unwrap();
1066
1067        assert!(
1068            !report
1069                .phases
1070                .iter()
1071                .any(|phase| phase.name == "warmup-only"),
1072            "warmup phases should not be recorded"
1073        );
1074        let prove = report
1075            .phases
1076            .iter()
1077            .find(|phase| phase.name == "prove")
1078            .expect("prove phase");
1079        assert!(prove.duration_ns > 0);
1080    }
1081
1082    #[test]
1083    fn profile_phase_keeps_the_v1_model_flat() {
1084        let spec = BenchSpec::new("semantic-flat", 1, 0).unwrap();
1085        let report = run_closure(spec, || {
1086            profile_phase("prove", || {
1087                std::thread::sleep(Duration::from_millis(1));
1088                profile_phase("inner", || std::thread::sleep(Duration::from_millis(1)));
1089            });
1090            Ok(())
1091        })
1092        .unwrap();
1093
1094        assert!(report.phases.iter().any(|phase| phase.name == "prove"));
1095        assert!(
1096            !report.phases.iter().any(|phase| phase.name == "inner"),
1097            "nested phases should not create a second flat phase entry"
1098        );
1099    }
1100
1101    #[test]
1102    fn run_with_setup_calls_setup_once() {
1103        use std::sync::atomic::{AtomicU32, Ordering};
1104
1105        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
1106        static RUN_COUNT: AtomicU32 = AtomicU32::new(0);
1107
1108        let spec = BenchSpec::new("test", 5, 2).unwrap();
1109        let report = run_closure_with_setup(
1110            spec,
1111            || {
1112                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
1113                vec![1, 2, 3]
1114            },
1115            |data| {
1116                RUN_COUNT.fetch_add(1, Ordering::SeqCst);
1117                std::hint::black_box(data.len());
1118                Ok(())
1119            },
1120        )
1121        .unwrap();
1122
1123        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1); // Setup called once
1124        assert_eq!(RUN_COUNT.load(Ordering::SeqCst), 7); // 2 warmup + 5 iterations
1125        assert_eq!(report.samples.len(), 5);
1126    }
1127
1128    #[test]
1129    fn run_with_setup_per_iter_calls_setup_each_time() {
1130        use std::sync::atomic::{AtomicU32, Ordering};
1131
1132        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
1133
1134        let spec = BenchSpec::new("test", 3, 1).unwrap();
1135        let report = run_closure_with_setup_per_iter(
1136            spec,
1137            || {
1138                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
1139                vec![1, 2, 3]
1140            },
1141            |data| {
1142                std::hint::black_box(data);
1143                Ok(())
1144            },
1145        )
1146        .unwrap();
1147
1148        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 4); // 1 warmup + 3 iterations
1149        assert_eq!(report.samples.len(), 3);
1150    }
1151
1152    #[test]
1153    fn run_with_setup_teardown_calls_both() {
1154        use std::sync::atomic::{AtomicU32, Ordering};
1155
1156        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
1157        static TEARDOWN_COUNT: AtomicU32 = AtomicU32::new(0);
1158
1159        let spec = BenchSpec::new("test", 3, 1).unwrap();
1160        let report = run_closure_with_setup_teardown(
1161            spec,
1162            || {
1163                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
1164                "resource"
1165            },
1166            |_resource| Ok(()),
1167            |_resource| {
1168                TEARDOWN_COUNT.fetch_add(1, Ordering::SeqCst);
1169            },
1170        )
1171        .unwrap();
1172
1173        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1);
1174        assert_eq!(TEARDOWN_COUNT.load(Ordering::SeqCst), 1);
1175        assert_eq!(report.samples.len(), 3);
1176    }
1177
1178    #[test]
1179    fn bench_report_serializes_exact_harness_timeline() {
1180        let spec = BenchSpec::new("timeline", 2, 1).unwrap();
1181        let report = run_closure_with_setup_teardown(
1182            spec,
1183            || {
1184                std::thread::sleep(Duration::from_millis(1));
1185                "resource"
1186            },
1187            |_resource| {
1188                std::thread::sleep(Duration::from_millis(1));
1189                Ok(())
1190            },
1191            |_resource| {
1192                std::thread::sleep(Duration::from_millis(1));
1193            },
1194        )
1195        .unwrap();
1196
1197        let json = serde_json::to_value(&report).unwrap();
1198        assert_eq!(json["timeline"][0]["phase"], "setup");
1199        assert_eq!(json["timeline"][1]["phase"], "warmup-benchmark");
1200        assert_eq!(json["timeline"][2]["phase"], "measured-benchmark");
1201        assert_eq!(json["timeline"][3]["phase"], "measured-benchmark");
1202        assert_eq!(json["timeline"][4]["phase"], "teardown");
1203    }
1204}