Skip to main content

scirs2_core/
bench_utils.rs

1//! # Benchmarking Utilities for SciRS2
2//!
3//! This module provides lightweight, pure-Rust benchmarking tools for measuring
4//! function performance, comparing implementations, and tracking throughput.
5//!
6//! ## Features
7//!
8//! - `Stopwatch` for manual timing with lap support
9//! - `benchmark_fn` for automated function benchmarking with statistics
10//! - `compare_implementations` for A/B testing with Welch's t-test
11//! - `throughput_bench` for measuring operations/second and bytes/second
12//! - `memory_bench` for estimating peak memory usage
13//! - Warm-up support (discard first N iterations)
14//! - CSV and JSON output for results
15
16use std::fmt;
17use std::time::{Duration, Instant};
18
19use crate::error::{CoreError, CoreResult, ErrorContext};
20
21// ---------------------------------------------------------------------------
22// Stopwatch
23// ---------------------------------------------------------------------------
24
25/// A simple stopwatch for manual timing with lap support.
26///
27/// # Example
28///
29/// ```
30/// use scirs2_core::bench_utils::Stopwatch;
31///
32/// let mut sw = Stopwatch::new();
33/// sw.start();
34/// // ... work ...
35/// let lap1 = sw.lap();
36/// // ... more work ...
37/// sw.stop();
38/// let total = sw.elapsed();
39/// ```
40#[derive(Debug, Clone)]
41pub struct Stopwatch {
42    start_time: Option<Instant>,
43    elapsed: Duration,
44    laps: Vec<Duration>,
45    running: bool,
46}
47
48impl Stopwatch {
49    /// Create a new stopped stopwatch.
50    pub fn new() -> Self {
51        Self {
52            start_time: None,
53            elapsed: Duration::ZERO,
54            laps: Vec::new(),
55            running: false,
56        }
57    }
58
59    /// Start (or resume) the stopwatch.
60    pub fn start(&mut self) {
61        if !self.running {
62            self.start_time = Some(Instant::now());
63            self.running = true;
64        }
65    }
66
67    /// Stop the stopwatch.
68    pub fn stop(&mut self) {
69        if self.running {
70            if let Some(start) = self.start_time.take() {
71                self.elapsed += start.elapsed();
72            }
73            self.running = false;
74        }
75    }
76
77    /// Record a lap time without stopping. Returns the lap duration.
78    pub fn lap(&mut self) -> Duration {
79        let now = Instant::now();
80        let lap_duration = if let Some(start) = self.start_time {
81            now.duration_since(start)
82        } else {
83            Duration::ZERO
84        };
85        self.laps.push(lap_duration);
86        // Reset the start for the next lap segment
87        self.start_time = Some(now);
88        lap_duration
89    }
90
91    /// Reset the stopwatch to zero.
92    pub fn reset(&mut self) {
93        self.start_time = None;
94        self.elapsed = Duration::ZERO;
95        self.laps.clear();
96        self.running = false;
97    }
98
99    /// Get total elapsed time. If still running, includes current segment.
100    pub fn elapsed(&self) -> Duration {
101        let mut total = self.elapsed;
102        if self.running {
103            if let Some(start) = self.start_time {
104                total += start.elapsed();
105            }
106        }
107        total
108    }
109
110    /// Get all recorded lap times.
111    pub fn laps(&self) -> &[Duration] {
112        &self.laps
113    }
114
115    /// Whether the stopwatch is currently running.
116    pub fn is_running(&self) -> bool {
117        self.running
118    }
119}
120
121impl Default for Stopwatch {
122    fn default() -> Self {
123        Self::new()
124    }
125}
126
127// ---------------------------------------------------------------------------
128// BenchmarkConfig
129// ---------------------------------------------------------------------------
130
131/// Configuration for a benchmark run.
132#[derive(Debug, Clone)]
133pub struct BenchmarkConfig {
134    /// Number of warm-up iterations (discarded).
135    pub warmup_iterations: usize,
136    /// Number of measured iterations.
137    pub iterations: usize,
138}
139
140impl BenchmarkConfig {
141    /// Create config with given warm-up and iteration counts.
142    pub fn new(warmup_iterations: usize, iterations: usize) -> Self {
143        Self {
144            warmup_iterations,
145            iterations,
146        }
147    }
148}
149
150impl Default for BenchmarkConfig {
151    fn default() -> Self {
152        Self {
153            warmup_iterations: 5,
154            iterations: 100,
155        }
156    }
157}
158
159// ---------------------------------------------------------------------------
160// BenchmarkStats
161// ---------------------------------------------------------------------------
162
163/// Statistics produced by a benchmark run.
164#[derive(Debug, Clone)]
165pub struct BenchmarkStats {
166    /// Minimum time observed.
167    pub min: Duration,
168    /// Maximum time observed.
169    pub max: Duration,
170    /// Arithmetic mean of all samples.
171    pub mean: Duration,
172    /// Median (50th percentile).
173    pub median: Duration,
174    /// 99th percentile.
175    pub p99: Duration,
176    /// Standard deviation in nanoseconds.
177    pub std_dev_nanos: f64,
178    /// Number of samples.
179    pub sample_count: usize,
180    /// All sample durations (sorted).
181    pub samples: Vec<Duration>,
182}
183
184impl BenchmarkStats {
185    /// Compute statistics from a **non-empty** vector of durations.
186    fn from_samples(mut durations: Vec<Duration>) -> CoreResult<Self> {
187        if durations.is_empty() {
188            return Err(CoreError::ValueError(ErrorContext::new(
189                "Cannot compute benchmark stats from zero samples",
190            )));
191        }
192        durations.sort();
193
194        let n = durations.len();
195        let min = durations[0];
196        let max = durations[n - 1];
197
198        let total_nanos: u128 = durations.iter().map(|d| d.as_nanos()).sum();
199        let mean_nanos = total_nanos / n as u128;
200        let mean = Duration::from_nanos(mean_nanos as u64);
201
202        let median = if n % 2 == 0 {
203            let a = durations[n / 2 - 1].as_nanos();
204            let b = durations[n / 2].as_nanos();
205            Duration::from_nanos(((a + b) / 2) as u64)
206        } else {
207            durations[n / 2]
208        };
209
210        let p99_idx = ((n as f64) * 0.99).ceil() as usize;
211        let p99 = durations[p99_idx.min(n - 1)];
212
213        // Std dev
214        let mean_f = mean_nanos as f64;
215        let variance: f64 = durations
216            .iter()
217            .map(|d| {
218                let diff = d.as_nanos() as f64 - mean_f;
219                diff * diff
220            })
221            .sum::<f64>()
222            / (n.max(1) as f64);
223        let std_dev_nanos = variance.sqrt();
224
225        Ok(Self {
226            min,
227            max,
228            mean,
229            median,
230            p99,
231            std_dev_nanos,
232            sample_count: n,
233            samples: durations,
234        })
235    }
236
237    /// Format stats as a CSV row (header: "min_ns,max_ns,mean_ns,median_ns,p99_ns,std_dev_ns,n").
238    pub fn to_csv_row(&self) -> String {
239        format!(
240            "{},{},{},{},{},{:.2},{}",
241            self.min.as_nanos(),
242            self.max.as_nanos(),
243            self.mean.as_nanos(),
244            self.median.as_nanos(),
245            self.p99.as_nanos(),
246            self.std_dev_nanos,
247            self.sample_count,
248        )
249    }
250
251    /// CSV header matching `to_csv_row`.
252    pub fn csv_header() -> &'static str {
253        "min_ns,max_ns,mean_ns,median_ns,p99_ns,std_dev_ns,n"
254    }
255
256    /// Format stats as a JSON string.
257    pub fn to_json(&self) -> String {
258        format!(
259            r#"{{"min_ns":{},"max_ns":{},"mean_ns":{},"median_ns":{},"p99_ns":{},"std_dev_ns":{:.2},"n":{}}}"#,
260            self.min.as_nanos(),
261            self.max.as_nanos(),
262            self.mean.as_nanos(),
263            self.median.as_nanos(),
264            self.p99.as_nanos(),
265            self.std_dev_nanos,
266            self.sample_count,
267        )
268    }
269}
270
271impl fmt::Display for BenchmarkStats {
272    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
273        write!(
274            f,
275            "min={:?}  max={:?}  mean={:?}  median={:?}  p99={:?}  std_dev={:.0}ns  n={}",
276            self.min,
277            self.max,
278            self.mean,
279            self.median,
280            self.p99,
281            self.std_dev_nanos,
282            self.sample_count,
283        )
284    }
285}
286
287// ---------------------------------------------------------------------------
288// benchmark_fn
289// ---------------------------------------------------------------------------
290
291/// Run a function `N` times (after warm-up) and return performance statistics.
292///
293/// The function under test receives no arguments and may return any type
294/// (the result is discarded via `std::hint::black_box`).
295///
296/// # Example
297///
298/// ```
299/// use scirs2_core::bench_utils::{benchmark_fn, BenchmarkConfig};
300///
301/// let stats = benchmark_fn(
302///     &BenchmarkConfig::new(3, 50),
303///     || {
304///         let v: Vec<f64> = (0..1000).map(|i| (i as f64).sqrt()).collect();
305///         v
306///     },
307/// ).expect("benchmark should succeed");
308///
309/// assert!(stats.sample_count == 50);
310/// ```
311pub fn benchmark_fn<F, R>(config: &BenchmarkConfig, mut func: F) -> CoreResult<BenchmarkStats>
312where
313    F: FnMut() -> R,
314{
315    if config.iterations == 0 {
316        return Err(CoreError::ValueError(ErrorContext::new(
317            "iterations must be > 0",
318        )));
319    }
320
321    // Warm-up
322    for _ in 0..config.warmup_iterations {
323        std::hint::black_box(func());
324    }
325
326    // Measured runs
327    let mut durations = Vec::with_capacity(config.iterations);
328    for _ in 0..config.iterations {
329        let start = Instant::now();
330        std::hint::black_box(func());
331        durations.push(start.elapsed());
332    }
333
334    BenchmarkStats::from_samples(durations)
335}
336
337// ---------------------------------------------------------------------------
338// compare_implementations (Welch's t-test)
339// ---------------------------------------------------------------------------
340
341/// Result of comparing two implementations.
342#[derive(Debug, Clone)]
343pub struct ComparisonResult {
344    /// Stats for implementation A.
345    pub stats_a: BenchmarkStats,
346    /// Stats for implementation B.
347    pub stats_b: BenchmarkStats,
348    /// Welch's t-statistic (positive means B is faster).
349    pub t_statistic: f64,
350    /// Approximate two-sided p-value.
351    pub p_value: f64,
352    /// Speedup ratio (mean_a / mean_b). > 1 means B is faster.
353    pub speedup: f64,
354    /// Whether the difference is statistically significant at alpha = 0.05.
355    pub significant: bool,
356}
357
358impl fmt::Display for ComparisonResult {
359    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
360        let faster = if self.speedup > 1.0 { "B" } else { "A" };
361        let ratio = if self.speedup > 1.0 {
362            self.speedup
363        } else if self.speedup > 0.0 {
364            1.0 / self.speedup
365        } else {
366            f64::NAN
367        };
368        write!(
369            f,
370            "{faster} is {ratio:.2}x faster  t={:.3}  p={:.4}  sig={}",
371            self.t_statistic, self.p_value, self.significant
372        )
373    }
374}
375
376/// Compare two implementations using Welch's t-test on their execution times.
377///
378/// Returns statistics for both plus a significance assessment.
379pub fn compare_implementations<FA, FB, RA, RB>(
380    config: &BenchmarkConfig,
381    mut func_a: FA,
382    mut func_b: FB,
383) -> CoreResult<ComparisonResult>
384where
385    FA: FnMut() -> RA,
386    FB: FnMut() -> RB,
387{
388    let stats_a = benchmark_fn(config, &mut func_a)?;
389    let stats_b = benchmark_fn(config, &mut func_b)?;
390
391    let n_a = stats_a.sample_count as f64;
392    let n_b = stats_b.sample_count as f64;
393    let mean_a = stats_a.mean.as_nanos() as f64;
394    let mean_b = stats_b.mean.as_nanos() as f64;
395    let var_a = stats_a.std_dev_nanos * stats_a.std_dev_nanos;
396    let var_b = stats_b.std_dev_nanos * stats_b.std_dev_nanos;
397
398    let se = ((var_a / n_a) + (var_b / n_b)).sqrt();
399    let t_statistic = if se > 0.0 {
400        (mean_a - mean_b) / se
401    } else {
402        0.0
403    };
404
405    // Approximate p-value using the normal distribution (valid for large N).
406    let p_value = approx_two_sided_p(t_statistic);
407
408    let speedup = if mean_b > 0.0 {
409        mean_a / mean_b
410    } else {
411        f64::NAN
412    };
413
414    Ok(ComparisonResult {
415        stats_a,
416        stats_b,
417        t_statistic,
418        p_value,
419        speedup,
420        significant: p_value < 0.05,
421    })
422}
423
424/// Approximate two-sided p-value from a t-statistic using the standard
425/// normal CDF (good approximation when df > 30).
426fn approx_two_sided_p(t: f64) -> f64 {
427    // Abramowitz & Stegun approximation for the standard normal CDF
428    let x = t.abs();
429    let b1 = 0.319_381_530;
430    let b2 = -0.356_563_782;
431    let b3 = 1.781_477_937;
432    let b4 = -1.821_255_978;
433    let b5 = 1.330_274_429;
434    let p_coeff = 0.231_641_9;
435
436    let t_val = 1.0 / (1.0 + p_coeff * x);
437    let t2 = t_val * t_val;
438    let t3 = t2 * t_val;
439    let t4 = t3 * t_val;
440    let t5 = t4 * t_val;
441
442    let pdf = (-x * x / 2.0).exp() / (2.0 * std::f64::consts::PI).sqrt();
443    let cdf = 1.0 - pdf * (b1 * t_val + b2 * t2 + b3 * t3 + b4 * t4 + b5 * t5);
444    let one_tail = 1.0 - cdf;
445    (2.0 * one_tail).min(1.0).max(0.0)
446}
447
448// ---------------------------------------------------------------------------
449// throughput_bench
450// ---------------------------------------------------------------------------
451
452/// Result from a throughput benchmark.
453#[derive(Debug, Clone)]
454pub struct ThroughputResult {
455    /// Underlying timing statistics.
456    pub stats: BenchmarkStats,
457    /// Operations per second.
458    pub ops_per_sec: f64,
459    /// Bytes processed per second (if bytes_per_op was provided).
460    pub bytes_per_sec: Option<f64>,
461}
462
463impl fmt::Display for ThroughputResult {
464    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
465        write!(f, "{:.2} ops/sec", self.ops_per_sec)?;
466        if let Some(bps) = self.bytes_per_sec {
467            let (val, unit) = humanize_bytes_per_sec(bps);
468            write!(f, "  {val:.2} {unit}")?;
469        }
470        Ok(())
471    }
472}
473
474fn humanize_bytes_per_sec(bps: f64) -> (f64, &'static str) {
475    if bps >= 1e9 {
476        (bps / 1e9, "GB/s")
477    } else if bps >= 1e6 {
478        (bps / 1e6, "MB/s")
479    } else if bps >= 1e3 {
480        (bps / 1e3, "KB/s")
481    } else {
482        (bps, "B/s")
483    }
484}
485
486/// Measure throughput (ops/sec, optionally bytes/sec).
487///
488/// * `bytes_per_op` -- if `Some(n)`, also report bytes/sec based on n bytes per invocation.
489pub fn throughput_bench<F, R>(
490    config: &BenchmarkConfig,
491    func: F,
492    bytes_per_op: Option<usize>,
493) -> CoreResult<ThroughputResult>
494where
495    F: FnMut() -> R,
496{
497    let stats = benchmark_fn(config, func)?;
498    let mean_secs = stats.mean.as_secs_f64();
499    let ops_per_sec = if mean_secs > 0.0 {
500        1.0 / mean_secs
501    } else {
502        f64::INFINITY
503    };
504    let bytes_per_sec = bytes_per_op.map(|b| b as f64 * ops_per_sec);
505
506    Ok(ThroughputResult {
507        stats,
508        ops_per_sec,
509        bytes_per_sec,
510    })
511}
512
513// ---------------------------------------------------------------------------
514// memory_bench
515// ---------------------------------------------------------------------------
516
517/// Result from a memory benchmark.
518#[derive(Debug, Clone)]
519pub struct MemoryBenchResult {
520    /// Estimated peak memory increase (bytes) during execution.
521    ///
522    /// Note: this is a best-effort estimate using the system allocator info
523    /// where available. On platforms without allocator introspection it
524    /// returns `None`.
525    pub peak_memory_bytes: Option<usize>,
526    /// Timing statistics.
527    pub stats: BenchmarkStats,
528}
529
530/// Measure execution time and attempt to estimate peak memory usage for `func`.
531///
532/// Memory estimation is best-effort. On most platforms we measure the
533/// difference in resident set size before and after execution. This is
534/// inherently approximate because the OS may not reclaim freed pages
535/// immediately.
536pub fn memory_bench<F, R>(config: &BenchmarkConfig, mut func: F) -> CoreResult<MemoryBenchResult>
537where
538    F: FnMut() -> R,
539{
540    // Get baseline RSS
541    let baseline_rss = current_rss_bytes();
542
543    // Warm-up
544    for _ in 0..config.warmup_iterations {
545        std::hint::black_box(func());
546    }
547
548    let mut durations = Vec::with_capacity(config.iterations);
549    let mut max_rss_delta: Option<usize> = None;
550
551    for _ in 0..config.iterations {
552        let before_rss = current_rss_bytes();
553        let start = Instant::now();
554        std::hint::black_box(func());
555        let elapsed = start.elapsed();
556        let after_rss = current_rss_bytes();
557
558        durations.push(elapsed);
559
560        if let (Some(before), Some(after)) = (before_rss, after_rss) {
561            let delta = after.saturating_sub(before);
562            max_rss_delta = Some(max_rss_delta.map_or(delta, |prev: usize| prev.max(delta)));
563        }
564    }
565
566    let stats = BenchmarkStats::from_samples(durations)?;
567
568    // Fall back to overall delta if per-iteration deltas were zero
569    let peak_memory_bytes = match max_rss_delta {
570        Some(0) => {
571            // Try overall measurement
572            let end_rss = current_rss_bytes();
573            match (baseline_rss, end_rss) {
574                (Some(b), Some(e)) => {
575                    let delta = e.saturating_sub(b);
576                    if delta > 0 {
577                        Some(delta)
578                    } else {
579                        Some(0)
580                    }
581                }
582                _ => None,
583            }
584        }
585        other => other,
586    };
587
588    Ok(MemoryBenchResult {
589        peak_memory_bytes,
590        stats,
591    })
592}
593
594/// Try to obtain the current resident set size in bytes.
595/// Returns `None` if not available on this platform.
596fn current_rss_bytes() -> Option<usize> {
597    // On macOS, use libc getrusage when the cross_platform feature is enabled
598    #[cfg(all(target_os = "macos", feature = "cross_platform"))]
599    {
600        return macos_rss();
601    }
602
603    // On Linux, read /proc/self/statm (no external deps needed)
604    #[cfg(target_os = "linux")]
605    {
606        return linux_rss();
607    }
608
609    // Fallback: not available
610    #[allow(unreachable_code)]
611    None
612}
613
614#[cfg(all(target_os = "macos", feature = "cross_platform"))]
615fn macos_rss() -> Option<usize> {
616    // Use the rusage approach which is simpler and doesn't require mach bindings
617    let mut usage: libc::rusage = unsafe { std::mem::zeroed() };
618    let ret = unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut usage) };
619    if ret == 0 {
620        // ru_maxrss is in bytes on macOS
621        Some(usage.ru_maxrss as usize)
622    } else {
623        None
624    }
625}
626
627#[cfg(target_os = "linux")]
628fn linux_rss() -> Option<usize> {
629    use std::fs;
630    let statm = fs::read_to_string("/proc/self/statm").ok()?;
631    let rss_pages: usize = statm.split_whitespace().nth(1)?.parse().ok()?;
632    let page_size = 4096_usize; // typical
633    Some(rss_pages * page_size)
634}
635
636// ---------------------------------------------------------------------------
637// BenchmarkReport -- CSV / JSON output
638// ---------------------------------------------------------------------------
639
640/// A named benchmark result for inclusion in a report.
641#[derive(Debug, Clone)]
642pub struct NamedBenchmark {
643    /// Human-readable label.
644    pub name: String,
645    /// The statistics.
646    pub stats: BenchmarkStats,
647}
648
649/// A collection of named benchmark results with CSV / JSON export.
650#[derive(Debug, Clone, Default)]
651pub struct BenchmarkReport {
652    /// All benchmark entries.
653    pub entries: Vec<NamedBenchmark>,
654}
655
656impl BenchmarkReport {
657    /// Create an empty report.
658    pub fn new() -> Self {
659        Self {
660            entries: Vec::new(),
661        }
662    }
663
664    /// Add a named result.
665    pub fn add(&mut self, name: impl Into<String>, stats: BenchmarkStats) {
666        self.entries.push(NamedBenchmark {
667            name: name.into(),
668            stats,
669        });
670    }
671
672    /// Serialize the report as CSV.
673    pub fn to_csv(&self) -> String {
674        let mut out = format!("name,{}\n", BenchmarkStats::csv_header());
675        for entry in &self.entries {
676            out.push_str(&format!("{},{}\n", entry.name, entry.stats.to_csv_row()));
677        }
678        out
679    }
680
681    /// Serialize the report as JSON.
682    pub fn to_json(&self) -> String {
683        let items: Vec<String> = self
684            .entries
685            .iter()
686            .map(|e| format!(r#"{{"name":"{}","stats":{}}}"#, e.name, e.stats.to_json()))
687            .collect();
688        format!("[{}]", items.join(","))
689    }
690}
691
692// ---------------------------------------------------------------------------
693// Tests
694// ---------------------------------------------------------------------------
695
696#[cfg(test)]
697mod tests {
698    use super::*;
699
700    #[test]
701    fn test_stopwatch_basic() {
702        let mut sw = Stopwatch::new();
703        assert!(!sw.is_running());
704
705        sw.start();
706        assert!(sw.is_running());
707
708        std::thread::sleep(Duration::from_millis(10));
709        sw.stop();
710
711        assert!(!sw.is_running());
712        assert!(sw.elapsed() >= Duration::from_millis(5));
713    }
714
715    #[test]
716    fn test_stopwatch_lap() {
717        let mut sw = Stopwatch::new();
718        sw.start();
719        std::thread::sleep(Duration::from_millis(5));
720        let lap1 = sw.lap();
721        assert!(lap1 >= Duration::from_millis(1));
722        std::thread::sleep(Duration::from_millis(5));
723        let _lap2 = sw.lap();
724        sw.stop();
725        assert_eq!(sw.laps().len(), 2);
726    }
727
728    #[test]
729    fn test_stopwatch_reset() {
730        let mut sw = Stopwatch::new();
731        sw.start();
732        std::thread::sleep(Duration::from_millis(5));
733        sw.stop();
734        assert!(sw.elapsed() > Duration::ZERO);
735        sw.reset();
736        assert_eq!(sw.elapsed(), Duration::ZERO);
737        assert!(sw.laps().is_empty());
738    }
739
740    #[test]
741    fn test_benchmark_fn_basic() {
742        let config = BenchmarkConfig::new(2, 20);
743        let stats = benchmark_fn(&config, || {
744            let mut sum = 0u64;
745            for i in 0..100 {
746                sum += i;
747            }
748            sum
749        })
750        .expect("benchmark_fn should succeed");
751
752        assert_eq!(stats.sample_count, 20);
753        assert!(stats.min <= stats.mean);
754        assert!(stats.mean <= stats.max);
755        assert!(stats.median <= stats.max);
756    }
757
758    #[test]
759    fn test_benchmark_fn_zero_iterations_error() {
760        let config = BenchmarkConfig::new(0, 0);
761        let result = benchmark_fn(&config, || 42);
762        assert!(result.is_err());
763    }
764
765    #[test]
766    fn test_benchmark_stats_csv_json() {
767        let config = BenchmarkConfig::new(0, 10);
768        let stats =
769            benchmark_fn(&config, || std::hint::black_box(42)).expect("benchmark should succeed");
770
771        let csv = stats.to_csv_row();
772        assert!(csv.contains(','));
773
774        let json = stats.to_json();
775        assert!(json.starts_with('{'));
776        assert!(json.contains("min_ns"));
777    }
778
779    #[test]
780    fn test_compare_implementations() {
781        let config = BenchmarkConfig::new(2, 30);
782        let result = compare_implementations(
783            &config,
784            || {
785                let mut v = 0u64;
786                for i in 0..100 {
787                    v += i;
788                }
789                v
790            },
791            || {
792                let mut v = 0u64;
793                for i in 0..100 {
794                    v += i;
795                }
796                v
797            },
798        )
799        .expect("compare should succeed");
800
801        assert_eq!(result.stats_a.sample_count, 30);
802        assert_eq!(result.stats_b.sample_count, 30);
803        // For nearly identical functions, speedup should be close to 1
804        assert!(result.speedup > 0.0);
805    }
806
807    #[test]
808    fn test_throughput_bench() {
809        let config = BenchmarkConfig::new(2, 20);
810        let result = throughput_bench(
811            &config,
812            || {
813                let v: Vec<u8> = vec![0u8; 1024];
814                std::hint::black_box(v);
815            },
816            Some(1024),
817        )
818        .expect("throughput bench should succeed");
819
820        assert!(result.ops_per_sec > 0.0);
821        assert!(result.bytes_per_sec.is_some());
822    }
823
824    #[test]
825    fn test_memory_bench() {
826        let config = BenchmarkConfig::new(1, 5);
827        let result = memory_bench(&config, || {
828            let v: Vec<u8> = vec![0u8; 1024 * 1024]; // 1 MiB
829            std::hint::black_box(v);
830        })
831        .expect("memory bench should succeed");
832
833        assert_eq!(result.stats.sample_count, 5);
834        // peak_memory_bytes may or may not be available depending on platform
835    }
836
837    #[test]
838    fn test_benchmark_report() {
839        let config = BenchmarkConfig::new(0, 5);
840        let stats =
841            benchmark_fn(&config, || std::hint::black_box(42)).expect("benchmark should succeed");
842
843        let mut report = BenchmarkReport::new();
844        report.add("test_func", stats);
845
846        let csv = report.to_csv();
847        assert!(csv.contains("test_func"));
848        assert!(csv.contains("min_ns"));
849
850        let json = report.to_json();
851        assert!(json.contains("test_func"));
852    }
853
854    #[test]
855    fn test_approx_two_sided_p() {
856        // For t=0, p should be ~1.0
857        let p0 = approx_two_sided_p(0.0);
858        assert!((p0 - 1.0).abs() < 0.1);
859
860        // For large |t|, p should be very small
861        let p_large = approx_two_sided_p(5.0);
862        assert!(p_large < 0.001);
863    }
864
865    #[test]
866    fn test_throughput_display() {
867        let config = BenchmarkConfig::new(0, 5);
868        let result = throughput_bench(&config, || std::hint::black_box(42), Some(1024))
869            .expect("should succeed");
870
871        let display = format!("{result}");
872        assert!(display.contains("ops/sec"));
873    }
874
875    #[test]
876    fn test_stopwatch_resume() {
877        let mut sw = Stopwatch::new();
878        sw.start();
879        std::thread::sleep(Duration::from_millis(5));
880        sw.stop();
881        let e1 = sw.elapsed();
882
883        sw.start(); // resume
884        std::thread::sleep(Duration::from_millis(5));
885        sw.stop();
886        let e2 = sw.elapsed();
887
888        assert!(e2 >= e1);
889    }
890}