Skip to main content

mobench_sdk/
timing.rs

1//! Lightweight benchmarking harness for mobile platforms.
2//!
3//! This module provides the core timing infrastructure for the mobench ecosystem.
4//! It was previously a separate crate (`mobench-runner`) but has been consolidated
5//! into `mobench-sdk` for a simpler dependency graph.
6//!
7//! The module is designed to be minimal and portable, with no platform-specific
8//! dependencies, making it suitable for compilation to Android and iOS targets.
9//!
10//! ## Overview
11//!
12//! The timing module executes benchmark functions with:
13//! - Configurable warmup iterations
14//! - Precise nanosecond-resolution timing
15//! - Simple, serializable results
16//!
17//! ## Usage
18//!
19//! Most users should use this via the higher-level [`crate::run_benchmark`] function
20//! or [`crate::BenchmarkBuilder`]. Direct usage is for custom integrations:
21//!
22//! ```
23//! use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
24//!
25//! // Define a benchmark specification
26//! let spec = BenchSpec::new("my_benchmark", 100, 10)?;
27//!
28//! // Run the benchmark
29//! let report = run_closure(spec, || {
30//!     // Your benchmark code
31//!     let sum: u64 = (0..1000).sum();
32//!     std::hint::black_box(sum);
33//!     Ok(())
34//! })?;
35//!
36//! // Analyze results
37//! let mean_ns = report.samples.iter()
38//!     .map(|s| s.duration_ns)
39//!     .sum::<u64>() / report.samples.len() as u64;
40//!
41//! println!("Mean: {} ns", mean_ns);
42//! # Ok::<(), TimingError>(())
43//! ```
44//!
45//! ## Types
46//!
47//! | Type | Description |
48//! |------|-------------|
49//! | [`BenchSpec`] | Benchmark configuration (name, iterations, warmup) |
50//! | [`BenchSample`] | Single timing measurement in nanoseconds |
51//! | [`BenchReport`] | Complete results with all samples |
52//! | [`TimingError`] | Error conditions during benchmarking |
53//!
54//! ## Feature Flags
55//!
56//! This module is always available. When using `mobench-sdk` with default features,
57//! you also get build automation and template generation. For minimal binary size
58//! (e.g., on mobile targets), use the `runner-only` feature:
59//!
60//! ```toml
61//! [dependencies]
62//! mobench-sdk = { version = "0.1", default-features = false, features = ["runner-only"] }
63//! ```
64
65use serde::{Deserialize, Serialize};
66use std::time::{Duration, Instant};
67use thiserror::Error;
68
69/// Benchmark specification defining what and how to benchmark.
70///
71/// Contains the benchmark name, number of measurement iterations, and
72/// warmup iterations to perform before measuring.
73///
74/// # Example
75///
76/// ```
77/// use mobench_sdk::timing::BenchSpec;
78///
79/// // Create a spec for 100 iterations with 10 warmup runs
80/// let spec = BenchSpec::new("sorting_benchmark", 100, 10)?;
81///
82/// assert_eq!(spec.name, "sorting_benchmark");
83/// assert_eq!(spec.iterations, 100);
84/// assert_eq!(spec.warmup, 10);
85/// # Ok::<(), mobench_sdk::timing::TimingError>(())
86/// ```
87///
88/// # Serialization
89///
90/// `BenchSpec` implements `Serialize` and `Deserialize` for JSON persistence:
91///
92/// ```
93/// use mobench_sdk::timing::BenchSpec;
94///
95/// let spec = BenchSpec {
96///     name: "my_bench".to_string(),
97///     iterations: 50,
98///     warmup: 5,
99/// };
100///
101/// let json = serde_json::to_string(&spec)?;
102/// let restored: BenchSpec = serde_json::from_str(&json)?;
103///
104/// assert_eq!(spec.name, restored.name);
105/// # Ok::<(), serde_json::Error>(())
106/// ```
107#[derive(Clone, Debug, Serialize, Deserialize)]
108pub struct BenchSpec {
109    /// Name of the benchmark, typically the fully-qualified function name.
110    ///
111    /// Examples: `"my_crate::fibonacci"`, `"sorting_benchmark"`
112    pub name: String,
113
114    /// Number of iterations to measure.
115    ///
116    /// Each iteration produces one [`BenchSample`]. Must be greater than zero.
117    pub iterations: u32,
118
119    /// Number of warmup iterations before measurement.
120    ///
121    /// Warmup iterations are not recorded. They allow CPU caches to warm
122    /// and any JIT compilation to complete. Can be zero.
123    pub warmup: u32,
124}
125
126impl BenchSpec {
127    /// Creates a new benchmark specification.
128    ///
129    /// # Arguments
130    ///
131    /// * `name` - Name identifier for the benchmark
132    /// * `iterations` - Number of measured iterations (must be > 0)
133    /// * `warmup` - Number of warmup iterations (can be 0)
134    ///
135    /// # Errors
136    ///
137    /// Returns [`TimingError::NoIterations`] if `iterations` is zero.
138    ///
139    /// # Example
140    ///
141    /// ```
142    /// use mobench_sdk::timing::BenchSpec;
143    ///
144    /// let spec = BenchSpec::new("test", 100, 10)?;
145    /// assert_eq!(spec.iterations, 100);
146    ///
147    /// // Zero iterations is an error
148    /// let err = BenchSpec::new("test", 0, 10);
149    /// assert!(err.is_err());
150    /// # Ok::<(), mobench_sdk::timing::TimingError>(())
151    /// ```
152    pub fn new(name: impl Into<String>, iterations: u32, warmup: u32) -> Result<Self, TimingError> {
153        if iterations == 0 {
154            return Err(TimingError::NoIterations { count: iterations });
155        }
156
157        Ok(Self {
158            name: name.into(),
159            iterations,
160            warmup,
161        })
162    }
163}
164
165/// A single timing sample from a benchmark iteration.
166///
167/// Contains the elapsed time in nanoseconds for one execution of the
168/// benchmark function.
169///
170/// # Example
171///
172/// ```
173/// use mobench_sdk::timing::BenchSample;
174///
175/// let sample = BenchSample { duration_ns: 1_500_000 };
176///
177/// // Convert to milliseconds
178/// let ms = sample.duration_ns as f64 / 1_000_000.0;
179/// assert_eq!(ms, 1.5);
180/// ```
181#[derive(Clone, Debug, Serialize, Deserialize)]
182pub struct BenchSample {
183    /// Duration of the iteration in nanoseconds.
184    ///
185    /// Measured using [`std::time::Instant`] for monotonic, high-resolution timing.
186    pub duration_ns: u64,
187}
188
189impl BenchSample {
190    /// Creates a sample from a [`Duration`].
191    fn from_duration(duration: Duration) -> Self {
192        Self {
193            duration_ns: duration.as_nanos() as u64,
194        }
195    }
196}
197
198/// Complete benchmark report with all timing samples.
199///
200/// Contains the original specification and all collected samples.
201/// Can be serialized to JSON for storage or transmission.
202///
203/// # Example
204///
205/// ```
206/// use mobench_sdk::timing::{BenchSpec, run_closure};
207///
208/// let spec = BenchSpec::new("example", 50, 5)?;
209/// let report = run_closure(spec, || {
210///     std::hint::black_box(42);
211///     Ok(())
212/// })?;
213///
214/// // Calculate statistics
215/// let samples: Vec<u64> = report.samples.iter()
216///     .map(|s| s.duration_ns)
217///     .collect();
218///
219/// let min = samples.iter().min().unwrap();
220/// let max = samples.iter().max().unwrap();
221/// let mean = samples.iter().sum::<u64>() / samples.len() as u64;
222///
223/// println!("Min: {} ns, Max: {} ns, Mean: {} ns", min, max, mean);
224/// # Ok::<(), mobench_sdk::timing::TimingError>(())
225/// ```
226#[derive(Clone, Debug, Serialize, Deserialize)]
227pub struct BenchReport {
228    /// The specification used for this benchmark run.
229    pub spec: BenchSpec,
230
231    /// All collected timing samples.
232    ///
233    /// The length equals `spec.iterations`. Samples are in execution order.
234    pub samples: Vec<BenchSample>,
235}
236
237impl BenchReport {
238    /// Returns the mean (average) duration in nanoseconds.
239    #[must_use]
240    pub fn mean_ns(&self) -> f64 {
241        if self.samples.is_empty() {
242            return 0.0;
243        }
244        let sum: u64 = self.samples.iter().map(|s| s.duration_ns).sum();
245        sum as f64 / self.samples.len() as f64
246    }
247
248    /// Returns the median duration in nanoseconds.
249    #[must_use]
250    pub fn median_ns(&self) -> f64 {
251        if self.samples.is_empty() {
252            return 0.0;
253        }
254        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
255        sorted.sort_unstable();
256        let len = sorted.len();
257        if len % 2 == 0 {
258            (sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
259        } else {
260            sorted[len / 2] as f64
261        }
262    }
263
264    /// Returns the standard deviation in nanoseconds (sample std dev, n-1).
265    #[must_use]
266    pub fn std_dev_ns(&self) -> f64 {
267        if self.samples.len() < 2 {
268            return 0.0;
269        }
270        let mean = self.mean_ns();
271        let variance: f64 = self
272            .samples
273            .iter()
274            .map(|s| {
275                let diff = s.duration_ns as f64 - mean;
276                diff * diff
277            })
278            .sum::<f64>()
279            / (self.samples.len() - 1) as f64;
280        variance.sqrt()
281    }
282
283    /// Returns the given percentile (0-100) in nanoseconds.
284    #[must_use]
285    pub fn percentile_ns(&self, p: f64) -> f64 {
286        if self.samples.is_empty() {
287            return 0.0;
288        }
289        let mut sorted: Vec<u64> = self.samples.iter().map(|s| s.duration_ns).collect();
290        sorted.sort_unstable();
291        let p = p.clamp(0.0, 100.0) / 100.0;
292        let index = (p * (sorted.len() - 1) as f64).round() as usize;
293        sorted[index.min(sorted.len() - 1)] as f64
294    }
295
296    /// Returns the minimum duration in nanoseconds.
297    #[must_use]
298    pub fn min_ns(&self) -> u64 {
299        self.samples
300            .iter()
301            .map(|s| s.duration_ns)
302            .min()
303            .unwrap_or(0)
304    }
305
306    /// Returns the maximum duration in nanoseconds.
307    #[must_use]
308    pub fn max_ns(&self) -> u64 {
309        self.samples
310            .iter()
311            .map(|s| s.duration_ns)
312            .max()
313            .unwrap_or(0)
314    }
315
316    /// Returns a statistical summary of the benchmark results.
317    #[must_use]
318    pub fn summary(&self) -> BenchSummary {
319        BenchSummary {
320            name: self.spec.name.clone(),
321            iterations: self.samples.len() as u32,
322            warmup: self.spec.warmup,
323            mean_ns: self.mean_ns(),
324            median_ns: self.median_ns(),
325            std_dev_ns: self.std_dev_ns(),
326            min_ns: self.min_ns(),
327            max_ns: self.max_ns(),
328            p95_ns: self.percentile_ns(95.0),
329            p99_ns: self.percentile_ns(99.0),
330        }
331    }
332}
333
334/// Statistical summary of benchmark results.
335#[derive(Clone, Debug, Serialize, Deserialize)]
336pub struct BenchSummary {
337    /// Name of the benchmark.
338    pub name: String,
339    /// Number of measured iterations.
340    pub iterations: u32,
341    /// Number of warmup iterations.
342    pub warmup: u32,
343    /// Mean duration in nanoseconds.
344    pub mean_ns: f64,
345    /// Median duration in nanoseconds.
346    pub median_ns: f64,
347    /// Standard deviation in nanoseconds.
348    pub std_dev_ns: f64,
349    /// Minimum duration in nanoseconds.
350    pub min_ns: u64,
351    /// Maximum duration in nanoseconds.
352    pub max_ns: u64,
353    /// 95th percentile in nanoseconds.
354    pub p95_ns: f64,
355    /// 99th percentile in nanoseconds.
356    pub p99_ns: f64,
357}
358
359/// Errors that can occur during benchmark execution.
360///
361/// # Example
362///
363/// ```
364/// use mobench_sdk::timing::{BenchSpec, TimingError};
365///
366/// // Zero iterations produces an error
367/// let result = BenchSpec::new("test", 0, 10);
368/// assert!(matches!(result, Err(TimingError::NoIterations { .. })));
369/// ```
370#[derive(Debug, Error)]
371pub enum TimingError {
372    /// The iteration count was zero or invalid.
373    ///
374    /// At least one iteration is required to produce a measurement.
375    /// The error includes the actual value provided for diagnostic purposes.
376    #[error("iterations must be greater than zero (got {count}). Minimum recommended: 10")]
377    NoIterations {
378        /// The invalid iteration count that was provided.
379        count: u32,
380    },
381
382    /// The benchmark function failed during execution.
383    ///
384    /// Contains a description of the failure.
385    #[error("benchmark function failed: {0}")]
386    Execution(String),
387}
388
389/// Runs a benchmark by executing a closure repeatedly.
390///
391/// This is the core benchmarking function. It:
392///
393/// 1. Executes the closure `spec.warmup` times without recording
394/// 2. Executes the closure `spec.iterations` times, recording each duration
395/// 3. Returns a [`BenchReport`] with all samples
396///
397/// # Arguments
398///
399/// * `spec` - Benchmark configuration specifying iterations and warmup
400/// * `f` - Closure to benchmark; must return `Result<(), TimingError>`
401///
402/// # Returns
403///
404/// A [`BenchReport`] containing all timing samples, or a [`TimingError`] if
405/// the benchmark fails.
406///
407/// # Example
408///
409/// ```
410/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
411///
412/// let spec = BenchSpec::new("sum_benchmark", 100, 10)?;
413///
414/// let report = run_closure(spec, || {
415///     let sum: u64 = (0..1000).sum();
416///     std::hint::black_box(sum);
417///     Ok(())
418/// })?;
419///
420/// assert_eq!(report.samples.len(), 100);
421///
422/// // Calculate mean duration
423/// let total_ns: u64 = report.samples.iter().map(|s| s.duration_ns).sum();
424/// let mean_ns = total_ns / report.samples.len() as u64;
425/// println!("Mean: {} ns", mean_ns);
426/// # Ok::<(), TimingError>(())
427/// ```
428///
429/// # Error Handling
430///
431/// If the closure returns an error, the benchmark stops immediately:
432///
433/// ```
434/// use mobench_sdk::timing::{BenchSpec, run_closure, TimingError};
435///
436/// let spec = BenchSpec::new("failing_bench", 100, 0)?;
437///
438/// let result = run_closure(spec, || {
439///     Err(TimingError::Execution("simulated failure".into()))
440/// });
441///
442/// assert!(result.is_err());
443/// # Ok::<(), TimingError>(())
444/// ```
445///
446/// # Timing Precision
447///
448/// Uses [`std::time::Instant`] for timing, which provides monotonic,
449/// nanosecond-resolution measurements on most platforms.
450pub fn run_closure<F>(spec: BenchSpec, mut f: F) -> Result<BenchReport, TimingError>
451where
452    F: FnMut() -> Result<(), TimingError>,
453{
454    if spec.iterations == 0 {
455        return Err(TimingError::NoIterations {
456            count: spec.iterations,
457        });
458    }
459
460    // Warmup phase - not measured
461    for _ in 0..spec.warmup {
462        f()?;
463    }
464
465    // Measurement phase
466    let mut samples = Vec::with_capacity(spec.iterations as usize);
467    for _ in 0..spec.iterations {
468        let start = Instant::now();
469        f()?;
470        samples.push(BenchSample::from_duration(start.elapsed()));
471    }
472
473    Ok(BenchReport { spec, samples })
474}
475
476/// Runs a benchmark with setup that executes once before all iterations.
477///
478/// The setup function is called once before timing begins, then the benchmark
479/// runs multiple times using a reference to the setup result. This is useful
480/// for expensive initialization that shouldn't be included in timing.
481///
482/// # Arguments
483///
484/// * `spec` - Benchmark configuration specifying iterations and warmup
485/// * `setup` - Function that creates the input data (called once, not timed)
486/// * `f` - Benchmark closure that receives a reference to setup result
487///
488/// # Example
489///
490/// ```ignore
491/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup};
492///
493/// fn setup_data() -> Vec<u8> {
494///     vec![0u8; 1_000_000]  // Expensive allocation not measured
495/// }
496///
497/// let spec = BenchSpec::new("hash_benchmark", 100, 10)?;
498/// let report = run_closure_with_setup(spec, setup_data, |data| {
499///     std::hint::black_box(compute_hash(data));
500///     Ok(())
501/// })?;
502/// ```
503pub fn run_closure_with_setup<S, T, F>(
504    spec: BenchSpec,
505    setup: S,
506    mut f: F,
507) -> Result<BenchReport, TimingError>
508where
509    S: FnOnce() -> T,
510    F: FnMut(&T) -> Result<(), TimingError>,
511{
512    if spec.iterations == 0 {
513        return Err(TimingError::NoIterations {
514            count: spec.iterations,
515        });
516    }
517
518    // Setup phase - not timed
519    let input = setup();
520
521    // Warmup phase - not recorded
522    for _ in 0..spec.warmup {
523        f(&input)?;
524    }
525
526    // Measurement phase
527    let mut samples = Vec::with_capacity(spec.iterations as usize);
528    for _ in 0..spec.iterations {
529        let start = Instant::now();
530        f(&input)?;
531        samples.push(BenchSample::from_duration(start.elapsed()));
532    }
533
534    Ok(BenchReport { spec, samples })
535}
536
537/// Runs a benchmark with per-iteration setup.
538///
539/// Setup runs before each iteration and is not timed. The benchmark takes
540/// ownership of the setup result, making this suitable for benchmarks that
541/// mutate their input (e.g., sorting).
542///
543/// # Arguments
544///
545/// * `spec` - Benchmark configuration specifying iterations and warmup
546/// * `setup` - Function that creates fresh input for each iteration (not timed)
547/// * `f` - Benchmark closure that takes ownership of setup result
548///
549/// # Example
550///
551/// ```ignore
552/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_per_iter};
553///
554/// fn generate_random_vec() -> Vec<i32> {
555///     (0..1000).map(|_| rand::random()).collect()
556/// }
557///
558/// let spec = BenchSpec::new("sort_benchmark", 100, 10)?;
559/// let report = run_closure_with_setup_per_iter(spec, generate_random_vec, |mut data| {
560///     data.sort();
561///     std::hint::black_box(data);
562///     Ok(())
563/// })?;
564/// ```
565pub fn run_closure_with_setup_per_iter<S, T, F>(
566    spec: BenchSpec,
567    mut setup: S,
568    mut f: F,
569) -> Result<BenchReport, TimingError>
570where
571    S: FnMut() -> T,
572    F: FnMut(T) -> Result<(), TimingError>,
573{
574    if spec.iterations == 0 {
575        return Err(TimingError::NoIterations {
576            count: spec.iterations,
577        });
578    }
579
580    // Warmup phase
581    for _ in 0..spec.warmup {
582        let input = setup();
583        f(input)?;
584    }
585
586    // Measurement phase
587    let mut samples = Vec::with_capacity(spec.iterations as usize);
588    for _ in 0..spec.iterations {
589        let input = setup(); // Not timed
590
591        let start = Instant::now();
592        f(input)?; // Only this is timed
593        samples.push(BenchSample::from_duration(start.elapsed()));
594    }
595
596    Ok(BenchReport { spec, samples })
597}
598
599/// Runs a benchmark with setup and teardown.
600///
601/// Setup runs once before all iterations, teardown runs once after all
602/// iterations complete. Neither is included in timing.
603///
604/// # Arguments
605///
606/// * `spec` - Benchmark configuration specifying iterations and warmup
607/// * `setup` - Function that creates the input data (called once, not timed)
608/// * `f` - Benchmark closure that receives a reference to setup result
609/// * `teardown` - Function that cleans up the input (called once, not timed)
610///
611/// # Example
612///
613/// ```ignore
614/// use mobench_sdk::timing::{BenchSpec, run_closure_with_setup_teardown};
615///
616/// fn setup_db() -> Database { Database::connect("test.db") }
617/// fn cleanup_db(db: Database) { db.close(); std::fs::remove_file("test.db").ok(); }
618///
619/// let spec = BenchSpec::new("db_benchmark", 100, 10)?;
620/// let report = run_closure_with_setup_teardown(
621///     spec,
622///     setup_db,
623///     |db| { db.query("SELECT *"); Ok(()) },
624///     cleanup_db,
625/// )?;
626/// ```
627pub fn run_closure_with_setup_teardown<S, T, F, D>(
628    spec: BenchSpec,
629    setup: S,
630    mut f: F,
631    teardown: D,
632) -> Result<BenchReport, TimingError>
633where
634    S: FnOnce() -> T,
635    F: FnMut(&T) -> Result<(), TimingError>,
636    D: FnOnce(T),
637{
638    if spec.iterations == 0 {
639        return Err(TimingError::NoIterations {
640            count: spec.iterations,
641        });
642    }
643
644    // Setup phase - not timed
645    let input = setup();
646
647    // Warmup phase
648    for _ in 0..spec.warmup {
649        f(&input)?;
650    }
651
652    // Measurement phase
653    let mut samples = Vec::with_capacity(spec.iterations as usize);
654    for _ in 0..spec.iterations {
655        let start = Instant::now();
656        f(&input)?;
657        samples.push(BenchSample::from_duration(start.elapsed()));
658    }
659
660    // Teardown phase - not timed
661    teardown(input);
662
663    Ok(BenchReport { spec, samples })
664}
665
666#[cfg(test)]
667mod tests {
668    use super::*;
669
670    #[test]
671    fn runs_benchmark() {
672        let spec = BenchSpec::new("noop", 3, 1).unwrap();
673        let report = run_closure(spec, || Ok(())).unwrap();
674
675        assert_eq!(report.samples.len(), 3);
676        let non_zero = report.samples.iter().filter(|s| s.duration_ns > 0).count();
677        assert!(non_zero >= 1);
678    }
679
680    #[test]
681    fn rejects_zero_iterations() {
682        let result = BenchSpec::new("test", 0, 10);
683        assert!(matches!(
684            result,
685            Err(TimingError::NoIterations { count: 0 })
686        ));
687    }
688
689    #[test]
690    fn allows_zero_warmup() {
691        let spec = BenchSpec::new("test", 5, 0).unwrap();
692        assert_eq!(spec.warmup, 0);
693
694        let report = run_closure(spec, || Ok(())).unwrap();
695        assert_eq!(report.samples.len(), 5);
696    }
697
698    #[test]
699    fn serializes_to_json() {
700        let spec = BenchSpec::new("test", 10, 2).unwrap();
701        let report = run_closure(spec, || Ok(())).unwrap();
702
703        let json = serde_json::to_string(&report).unwrap();
704        let restored: BenchReport = serde_json::from_str(&json).unwrap();
705
706        assert_eq!(restored.spec.name, "test");
707        assert_eq!(restored.samples.len(), 10);
708    }
709
710    #[test]
711    fn run_with_setup_calls_setup_once() {
712        use std::sync::atomic::{AtomicU32, Ordering};
713
714        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
715        static RUN_COUNT: AtomicU32 = AtomicU32::new(0);
716
717        let spec = BenchSpec::new("test", 5, 2).unwrap();
718        let report = run_closure_with_setup(
719            spec,
720            || {
721                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
722                vec![1, 2, 3]
723            },
724            |data| {
725                RUN_COUNT.fetch_add(1, Ordering::SeqCst);
726                std::hint::black_box(data.len());
727                Ok(())
728            },
729        )
730        .unwrap();
731
732        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1); // Setup called once
733        assert_eq!(RUN_COUNT.load(Ordering::SeqCst), 7); // 2 warmup + 5 iterations
734        assert_eq!(report.samples.len(), 5);
735    }
736
737    #[test]
738    fn run_with_setup_per_iter_calls_setup_each_time() {
739        use std::sync::atomic::{AtomicU32, Ordering};
740
741        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
742
743        let spec = BenchSpec::new("test", 3, 1).unwrap();
744        let report = run_closure_with_setup_per_iter(
745            spec,
746            || {
747                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
748                vec![1, 2, 3]
749            },
750            |data| {
751                std::hint::black_box(data);
752                Ok(())
753            },
754        )
755        .unwrap();
756
757        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 4); // 1 warmup + 3 iterations
758        assert_eq!(report.samples.len(), 3);
759    }
760
761    #[test]
762    fn run_with_setup_teardown_calls_both() {
763        use std::sync::atomic::{AtomicU32, Ordering};
764
765        static SETUP_COUNT: AtomicU32 = AtomicU32::new(0);
766        static TEARDOWN_COUNT: AtomicU32 = AtomicU32::new(0);
767
768        let spec = BenchSpec::new("test", 3, 1).unwrap();
769        let report = run_closure_with_setup_teardown(
770            spec,
771            || {
772                SETUP_COUNT.fetch_add(1, Ordering::SeqCst);
773                "resource"
774            },
775            |_resource| Ok(()),
776            |_resource| {
777                TEARDOWN_COUNT.fetch_add(1, Ordering::SeqCst);
778            },
779        )
780        .unwrap();
781
782        assert_eq!(SETUP_COUNT.load(Ordering::SeqCst), 1);
783        assert_eq!(TEARDOWN_COUNT.load(Ordering::SeqCst), 1);
784        assert_eq!(report.samples.len(), 3);
785    }
786}