#![allow(clippy::must_use_candidate)]
use std::time::{Duration, Instant};
#[derive(Debug)]
pub struct BenchmarkTimer {
start: Instant,
label: Option<String>,
}
impl BenchmarkTimer {
pub fn start() -> Self {
Self {
start: Instant::now(),
label: None,
}
}
pub fn start_labeled(label: impl Into<String>) -> Self {
Self {
start: Instant::now(),
label: Some(label.into()),
}
}
pub fn stop(&self) -> Duration {
self.start.elapsed()
}
pub fn stop_ms(&self) -> f64 {
self.start.elapsed().as_secs_f64() * 1000.0
}
pub fn stop_us(&self) -> f64 {
self.start.elapsed().as_secs_f64() * 1_000_000.0
}
pub fn stop_ns(&self) -> u128 {
self.start.elapsed().as_nanos()
}
pub fn label(&self) -> Option<&str> {
self.label.as_deref()
}
}
#[derive(Debug, Clone)]
pub struct BenchmarkStats {
pub name: String,
samples: Vec<Duration>,
ops_per_sample: usize,
}
impl BenchmarkStats {
pub fn new(name: impl Into<String>) -> Self {
Self {
name: name.into(),
samples: Vec::new(),
ops_per_sample: 1,
}
}
pub const fn set_ops_per_sample(&mut self, ops: usize) {
self.ops_per_sample = ops;
}
pub fn record(&mut self, duration: Duration) {
self.samples.push(duration);
}
pub fn record_ms(&mut self, ms: f64) {
self.samples.push(Duration::from_secs_f64(ms / 1000.0));
}
pub fn count(&self) -> usize {
self.samples.len()
}
pub fn mean(&self) -> Option<Duration> {
if self.samples.is_empty() {
return None;
}
let total: Duration = self.samples.iter().sum();
Some(total / self.samples.len() as u32)
}
pub fn median(&self) -> Option<Duration> {
if self.samples.is_empty() {
return None;
}
let mut sorted = self.samples.clone();
sorted.sort();
let mid = sorted.len() / 2;
if sorted.len() % 2 == 0 {
Some((sorted[mid - 1] + sorted[mid]) / 2)
} else {
Some(sorted[mid])
}
}
pub fn min(&self) -> Option<Duration> {
self.samples.iter().min().copied()
}
pub fn max(&self) -> Option<Duration> {
self.samples.iter().max().copied()
}
pub fn std_dev(&self) -> Option<Duration> {
if self.samples.len() < 2 {
return None;
}
let mean = self.mean()?.as_secs_f64();
let variance: f64 = self
.samples
.iter()
.map(|d| {
let diff = d.as_secs_f64() - mean;
diff * diff
})
.sum::<f64>()
/ (self.samples.len() - 1) as f64;
Some(Duration::from_secs_f64(variance.sqrt()))
}
pub fn throughput(&self) -> Option<f64> {
let mean = self.mean()?;
let ops_per_sec = self.ops_per_sample as f64 / mean.as_secs_f64();
Some(ops_per_sec)
}
pub fn percentile(&self, p: f64) -> Option<Duration> {
if self.samples.is_empty() || !(0.0..=100.0).contains(&p) {
return None;
}
let mut sorted = self.samples.clone();
sorted.sort();
let idx = ((p / 100.0) * (sorted.len() - 1) as f64).round() as usize;
Some(sorted[idx])
}
pub fn report(&self) -> String {
use std::fmt::Write;
if self.samples.is_empty() {
return format!("Benchmark '{}': No samples recorded", self.name);
}
let mut report = String::new();
let _ = writeln!(report, "Benchmark: {}", self.name);
let _ = writeln!(report, " Samples: {}", self.count());
if let Some(mean) = self.mean() {
let _ = writeln!(report, " Mean: {mean:?}");
}
if let Some(median) = self.median() {
let _ = writeln!(report, " Median: {median:?}");
}
if let Some(std_dev) = self.std_dev() {
let _ = writeln!(report, " Std Dev: {std_dev:?}");
}
if let Some(min) = self.min() {
let _ = writeln!(report, " Min: {min:?}");
}
if let Some(max) = self.max() {
let _ = writeln!(report, " Max: {max:?}");
}
if let Some(p99) = self.percentile(99.0) {
let _ = writeln!(report, " P99: {p99:?}");
}
if let Some(throughput) = self.throughput() {
if throughput > 1000.0 {
let _ = writeln!(report, " Throughput: {:.2} K ops/s", throughput / 1000.0);
} else {
let _ = writeln!(report, " Throughput: {throughput:.2} ops/s");
}
}
report
}
pub fn clear(&mut self) {
self.samples.clear();
}
}
pub fn measure<F, R>(f: F) -> (R, Duration)
where
F: FnOnce() -> R,
{
let timer = BenchmarkTimer::start();
let result = f();
(result, timer.stop())
}
pub fn measure_iterations<F, R>(iterations: usize, mut f: F) -> BenchmarkStats
where
F: FnMut() -> R,
{
let mut stats = BenchmarkStats::new("benchmark");
for _ in 0..iterations {
let timer = BenchmarkTimer::start();
let _ = f();
stats.record(timer.stop());
}
stats
}
pub fn measure_with_warmup<F, R>(warmup: usize, iterations: usize, mut f: F) -> BenchmarkStats
where
F: FnMut() -> R,
{
for _ in 0..warmup {
let _ = f();
}
measure_iterations(iterations, f)
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryUsage {
pub bytes: usize,
}
impl MemoryUsage {
pub const fn from_bytes(bytes: usize) -> Self {
Self { bytes }
}
pub fn kb(&self) -> f64 {
self.bytes as f64 / 1024.0
}
pub fn mb(&self) -> f64 {
self.bytes as f64 / (1024.0 * 1024.0)
}
pub fn gb(&self) -> f64 {
self.bytes as f64 / (1024.0 * 1024.0 * 1024.0)
}
pub fn format(&self) -> String {
crate::utils::format_memory(self.bytes)
}
}
pub const fn estimate_statevector_memory(num_qubits: u32) -> MemoryUsage {
MemoryUsage::from_bytes(crate::utils::estimate_statevector_memory(num_qubits))
}
#[derive(Debug, Clone)]
pub struct BenchmarkConfig {
pub warmup_iterations: usize,
pub measure_iterations: usize,
pub ops_per_iteration: usize,
pub verbose: bool,
}
impl Default for BenchmarkConfig {
fn default() -> Self {
Self {
warmup_iterations: 10,
measure_iterations: 100,
ops_per_iteration: 1,
verbose: false,
}
}
}
impl BenchmarkConfig {
pub const fn quick() -> Self {
Self {
warmup_iterations: 5,
measure_iterations: 20,
ops_per_iteration: 1,
verbose: false,
}
}
pub const fn thorough() -> Self {
Self {
warmup_iterations: 50,
measure_iterations: 1000,
ops_per_iteration: 1,
verbose: false,
}
}
pub fn run<F, R>(&self, name: &str, mut f: F) -> BenchmarkStats
where
F: FnMut() -> R,
{
if self.verbose {
eprintln!(
"Running benchmark '{}' with {} warmup + {} iterations...",
name, self.warmup_iterations, self.measure_iterations
);
}
for _ in 0..self.warmup_iterations {
let _ = f();
}
let mut stats = BenchmarkStats::new(name);
stats.set_ops_per_sample(self.ops_per_iteration);
for _ in 0..self.measure_iterations {
let timer = BenchmarkTimer::start();
let _ = f();
stats.record(timer.stop());
}
if self.verbose {
eprintln!("{}", stats.report());
}
stats
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_benchmark_timer() {
let timer = BenchmarkTimer::start();
thread::sleep(Duration::from_millis(10));
let elapsed = timer.stop();
assert!(elapsed >= Duration::from_millis(9));
assert!(elapsed < Duration::from_millis(50));
}
#[test]
fn test_benchmark_timer_labeled() {
let timer = BenchmarkTimer::start_labeled("test_op");
assert_eq!(timer.label(), Some("test_op"));
let _ = timer.stop();
}
#[test]
fn test_benchmark_stats() {
let mut stats = BenchmarkStats::new("test");
stats.record(Duration::from_millis(10));
stats.record(Duration::from_millis(20));
stats.record(Duration::from_millis(30));
assert_eq!(stats.count(), 3);
assert_eq!(stats.mean(), Some(Duration::from_millis(20)));
assert_eq!(stats.median(), Some(Duration::from_millis(20)));
assert_eq!(stats.min(), Some(Duration::from_millis(10)));
assert_eq!(stats.max(), Some(Duration::from_millis(30)));
}
#[test]
fn test_benchmark_stats_empty() {
let stats = BenchmarkStats::new("empty");
assert_eq!(stats.count(), 0);
assert_eq!(stats.mean(), None);
assert_eq!(stats.median(), None);
}
#[test]
fn test_benchmark_stats_throughput() {
let mut stats = BenchmarkStats::new("throughput_test");
stats.set_ops_per_sample(100);
stats.record(Duration::from_secs(1));
let throughput = stats
.throughput()
.expect("throughput should be calculable with one sample");
assert!((throughput - 100.0).abs() < 0.1);
}
#[test]
fn test_measure() {
let (result, duration) = measure(|| {
thread::sleep(Duration::from_millis(5));
42
});
assert_eq!(result, 42);
assert!(duration >= Duration::from_millis(4));
}
#[test]
fn test_measure_iterations() {
let stats = measure_iterations(10, || {
thread::sleep(Duration::from_millis(1));
});
assert_eq!(stats.count(), 10);
}
#[test]
fn test_memory_usage() {
let mem = MemoryUsage::from_bytes(1024 * 1024);
assert!((mem.kb() - 1024.0).abs() < 0.01);
assert!((mem.mb() - 1.0).abs() < 0.01);
}
#[test]
fn test_estimate_statevector_memory() {
let mem = estimate_statevector_memory(10);
assert_eq!(mem.bytes, 16384);
}
#[test]
fn test_benchmark_config() {
let config = BenchmarkConfig::quick();
let stats = config.run("quick_test", || {
let _ = 1 + 1;
});
assert_eq!(stats.count(), 20);
}
#[test]
fn test_percentile() {
let mut stats = BenchmarkStats::new("percentile_test");
for i in 1..=100 {
stats.record(Duration::from_millis(i));
}
let p50 = stats
.percentile(50.0)
.expect("p50 should be calculable with 100 samples");
assert!(p50 >= Duration::from_millis(49) && p50 <= Duration::from_millis(51));
let p99 = stats
.percentile(99.0)
.expect("p99 should be calculable with 100 samples");
assert!(p99 >= Duration::from_millis(98));
}
#[test]
fn test_benchmark_report() {
let mut stats = BenchmarkStats::new("report_test");
stats.record(Duration::from_millis(10));
stats.record(Duration::from_millis(20));
let report = stats.report();
assert!(report.contains("report_test"));
assert!(report.contains("Samples: 2"));
assert!(report.contains("Mean:"));
}
}