1use serde::{Deserialize, Serialize};
26use std::time::Duration;
27
28pub mod baseline;
29pub mod changepoint;
30pub mod config;
31pub mod cpu_analysis;
32pub mod cpu_monitor;
33pub mod measurement;
34pub mod output;
35pub mod statistics;
36
37pub use baseline::*;
38pub use changepoint::*;
39pub use config::*;
40pub use cpu_analysis::*;
41pub use cpu_monitor::*;
42pub use measurement::*;
43pub use output::*;
44pub use statistics::*;
45
46pub use inventory;
48
49#[derive(Debug, Default, Clone, Serialize, Deserialize)]
53pub struct Percentiles {
54 pub p50: Duration,
56 pub p90: Duration,
58 pub p99: Duration,
60 pub mean: Duration,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
68pub struct Statistics {
69 pub mean: u128,
71 pub median: u128,
73 pub p90: u128,
75 pub p99: u128,
77 pub std_dev: f64,
79 pub variance: f64,
81 pub min: u128,
83 pub max: u128,
85 pub sample_count: usize,
87}
88
89#[derive(Debug, Default, Clone, Serialize, Deserialize)]
93pub struct BenchResult {
94 pub name: String,
96 pub module: String,
98 pub iterations: usize,
100 pub samples: usize,
102 pub percentiles: Percentiles,
104 pub all_timings: Vec<Duration>,
106 #[serde(default)]
108 pub cpu_samples: Vec<CpuSnapshot>,
109 #[serde(default)]
111 pub warmup_ms: Option<u128>,
112 #[serde(default)]
114 pub warmup_iterations: Option<u64>,
115}
116
117#[derive(Debug, Clone, Serialize, Deserialize)]
121pub struct Comparison {
122 pub current_mean: Duration,
124 pub baseline_mean: Duration,
126 pub percentage_change: f64,
128 #[serde(default)]
130 pub baseline_count: usize,
131 #[serde(skip_serializing_if = "Option::is_none")]
133 pub z_score: Option<f64>,
134 #[serde(skip_serializing_if = "Option::is_none")]
136 pub confidence_interval: Option<(f64, f64)>,
137 #[serde(skip_serializing_if = "Option::is_none")]
139 pub change_probability: Option<f64>,
140}
141
142pub struct SimpleBench {
147 pub name: &'static str,
149 pub module: &'static str,
151 pub func: fn(),
153}
154
155inventory::collect!(SimpleBench);
156
157#[derive(Debug, Serialize, Deserialize)]
161pub struct BenchmarkInfo {
162 pub name: String,
164 pub module: String,
166}
167
168pub fn list_benchmarks_json() {
172 let benchmarks: Vec<BenchmarkInfo> = inventory::iter::<SimpleBench>()
173 .map(|b| BenchmarkInfo {
174 name: b.name.to_string(),
175 module: b.module.to_string(),
176 })
177 .collect();
178 println!("{}", serde_json::to_string(&benchmarks).unwrap());
179}
180
181pub fn run_single_benchmark_json(config: &crate::config::BenchmarkConfig) {
186 let bench_name = std::env::var("SIMPLEBENCH_BENCH_FILTER")
187 .expect("SIMPLEBENCH_BENCH_FILTER must be set for single benchmark execution");
188
189 let pin_core: usize = std::env::var("SIMPLEBENCH_PIN_CORE")
190 .ok()
191 .and_then(|s| s.parse().ok())
192 .unwrap_or(1); if let Err(e) = affinity::set_thread_affinity([pin_core]) {
196 eprintln!(
197 "Warning: Failed to set affinity to core {}: {:?}",
198 pin_core, e
199 );
200 }
201
202 for bench in inventory::iter::<SimpleBench>() {
204 if bench.name == bench_name {
205 let result = measure_with_warmup(
206 bench.name.to_string(),
207 bench.module.to_string(),
208 bench.func,
209 config.measurement.iterations,
210 config.measurement.samples,
211 config.measurement.warmup_duration_secs,
212 );
213 println!("{}", serde_json::to_string(&result).unwrap());
214 return;
215 }
216 }
217
218 eprintln!("ERROR: Benchmark '{}' not found", bench_name);
219 std::process::exit(1);
220}
221
222pub(crate) fn calculate_percentiles(timings: &[Duration]) -> Percentiles {
223 let mut sorted_timings = timings.to_vec();
224 sorted_timings.sort();
225
226 let len = sorted_timings.len();
227 let p50_idx = (len * 50) / 100;
228 let p90_idx = (len * 90) / 100;
229 let p99_idx = (len * 99) / 100;
230
231 let sum_nanos: u128 = timings.iter().map(|d| d.as_nanos()).sum();
233 let mean_nanos = sum_nanos / (len as u128);
234 let mean = Duration::from_nanos(mean_nanos as u64);
235
236 Percentiles {
237 p50: sorted_timings[p50_idx.min(len - 1)],
238 p90: sorted_timings[p90_idx.min(len - 1)],
239 p99: sorted_timings[p99_idx.min(len - 1)],
240 mean,
241 }
242}
243
244pub fn calculate_statistics(samples: &[u128]) -> Statistics {
246 let sample_count = samples.len();
247
248 if sample_count == 0 {
249 return Statistics {
250 mean: 0,
251 median: 0,
252 p90: 0,
253 p99: 0,
254 std_dev: 0.0,
255 variance: 0.0,
256 min: 0,
257 max: 0,
258 sample_count: 0,
259 };
260 }
261
262 let mut sorted = samples.to_vec();
264 sorted.sort();
265
266 let p50_idx = (sample_count * 50) / 100;
268 let p90_idx = (sample_count * 90) / 100;
269 let p99_idx = (sample_count * 99) / 100;
270
271 let median = sorted[p50_idx.min(sample_count - 1)];
272 let p90 = sorted[p90_idx.min(sample_count - 1)];
273 let p99 = sorted[p99_idx.min(sample_count - 1)];
274
275 let sum: u128 = samples.iter().sum();
277 let mean = sum / (sample_count as u128);
278
279 let mean_f64 = mean as f64;
281 let variance: f64 = samples
282 .iter()
283 .map(|&s| {
284 let diff = s as f64 - mean_f64;
285 diff * diff
286 })
287 .sum::<f64>()
288 / (sample_count as f64);
289
290 let std_dev = variance.sqrt();
291
292 let min = *sorted.first().unwrap();
294 let max = *sorted.last().unwrap();
295
296 Statistics {
297 mean,
298 median,
299 p90,
300 p99,
301 std_dev,
302 variance,
303 min,
304 max,
305 sample_count,
306 }
307}
308
309pub fn run_and_stream_benchmarks(config: &crate::config::BenchmarkConfig) -> Vec<BenchResult> {
314 use crate::baseline::{BaselineManager, ComparisonResult};
315 use crate::output::{
316 print_benchmark_result_line, print_comparison_line, print_new_baseline_line,
317 print_streaming_summary,
318 };
319 use colored::*;
320
321 match affinity::set_thread_affinity([0]) {
322 Ok(_) => println!(
323 "{} {}\n",
324 "Set affinity to core".green().bold(),
325 "0".cyan().bold()
326 ),
327 Err(e) => println!("Failed to set core affinity {e:?}"),
328 };
329
330 crate::cpu_monitor::verify_benchmark_environment(0);
332
333 let mut results = Vec::new();
334 let mut comparisons = Vec::new();
335
336 let baseline_manager = match BaselineManager::new() {
338 Ok(bm) => Some(bm),
339 Err(e) => {
340 eprintln!("Warning: Could not initialize baseline manager: {}", e);
341 eprintln!("Running without baseline comparison.");
342 None
343 }
344 };
345
346 let bench_filter = std::env::var("SIMPLEBENCH_BENCH_FILTER").ok();
348
349 let total_benchmarks: usize = inventory::iter::<SimpleBench>().count();
351 let filtered_count = if let Some(ref filter) = bench_filter {
352 inventory::iter::<SimpleBench>()
353 .filter(|b| b.name.contains(filter))
354 .count()
355 } else {
356 total_benchmarks
357 };
358
359 println!(
360 "{} {} {} {} {}",
361 "Running benchmarks with".green().bold(),
362 config.measurement.samples,
363 "samples ×".green().bold(),
364 config.measurement.iterations,
365 "iterations".green().bold()
366 );
367
368 if let Some(ref filter) = bench_filter {
369 println!(
370 "{} {} ({} matched filter: \"{}\")\n",
371 "Filtering to".dimmed(),
372 filtered_count,
373 if filtered_count == 1 {
374 "benchmark"
375 } else {
376 "benchmarks"
377 },
378 filter
379 );
380 } else {
381 println!();
382 }
383
384 for bench in inventory::iter::<SimpleBench> {
386 if let Some(ref filter) = bench_filter {
388 if !bench.name.contains(filter) {
389 continue; }
391 }
392 let result = measure_with_warmup(
394 bench.name.to_string(),
395 bench.module.to_string(),
396 bench.func,
397 config.measurement.iterations,
398 config.measurement.samples,
399 config.measurement.warmup_duration_secs,
400 );
401
402 print_benchmark_result_line(&result);
404
405 if let Some(ref bm) = baseline_manager {
407 let crate_name = result.module.split("::").next().unwrap_or("unknown");
408
409 let mut is_regression = false;
411 if let Ok(historical) =
412 bm.load_recent_baselines(crate_name, &result.name, config.comparison.window_size)
413 {
414 if !historical.is_empty() {
415 let comparison_result = crate::baseline::detect_regression_with_cpd(
417 &result,
418 &historical,
419 config.comparison.threshold,
420 config.comparison.confidence_level,
421 config.comparison.cp_threshold,
422 config.comparison.hazard_rate,
423 );
424
425 is_regression = comparison_result.is_regression;
426
427 if let Some(ref comparison) = comparison_result.comparison {
428 print_comparison_line(
429 comparison,
430 &result.name,
431 comparison_result.is_regression,
432 );
433 }
434
435 comparisons.push(comparison_result);
436 } else {
437 print_new_baseline_line(&result.name);
439
440 comparisons.push(ComparisonResult {
441 benchmark_name: result.name.clone(),
442 comparison: None,
443 is_regression: false,
444 });
445 }
446 }
447
448 if let Err(e) = bm.save_baseline(crate_name, &result, is_regression) {
450 eprintln!(
451 "Warning: Failed to save baseline for {}: {}",
452 result.name, e
453 );
454 }
455 }
456
457 results.push(result);
458 println!(); }
460
461 if !comparisons.is_empty() {
463 print_streaming_summary(&comparisons, &config.comparison);
464
465 if let Some(ref filter) = bench_filter {
467 println!(
468 "\n{} {} of {} total benchmarks (filter: \"{}\")",
469 "Ran".dimmed(),
470 filtered_count,
471 total_benchmarks,
472 filter
473 );
474 }
475 }
476
477 results
478}
479
480#[cfg(test)]
481mod tests {
482 use super::*;
483
484 #[test]
485 fn test_calculate_percentiles() {
486 let timings = vec![
487 Duration::from_millis(1),
488 Duration::from_millis(2),
489 Duration::from_millis(3),
490 Duration::from_millis(4),
491 Duration::from_millis(5),
492 Duration::from_millis(6),
493 Duration::from_millis(7),
494 Duration::from_millis(8),
495 Duration::from_millis(9),
496 Duration::from_millis(10),
497 ];
498
499 let percentiles = calculate_percentiles(&timings);
500
501 assert_eq!(percentiles.p50, Duration::from_millis(6));
504 assert_eq!(percentiles.p90, Duration::from_millis(10));
505 assert_eq!(percentiles.p99, Duration::from_millis(10));
506 assert_eq!(percentiles.mean, Duration::from_micros(5500));
507 }
508
509 #[test]
510 fn test_calculate_percentiles_single_element() {
511 let timings = vec![Duration::from_millis(5)];
512 let percentiles = calculate_percentiles(&timings);
513
514 assert_eq!(percentiles.p50, Duration::from_millis(5));
515 assert_eq!(percentiles.p90, Duration::from_millis(5));
516 assert_eq!(percentiles.p99, Duration::from_millis(5));
517 assert_eq!(percentiles.mean, Duration::from_millis(5));
518 }
519}