1use serde::{Deserialize, Serialize};
26use std::time::Duration;
27
28pub mod baseline;
29pub mod changepoint;
30pub mod config;
31pub mod cpu_analysis;
32pub mod cpu_monitor;
33pub mod measurement;
34pub mod output;
35pub mod statistics;
36
37pub use baseline::*;
38pub use changepoint::*;
39pub use config::*;
40pub use cpu_analysis::*;
41pub use cpu_monitor::*;
42pub use measurement::*;
43pub use output::*;
44pub use statistics::*;
45
46pub use inventory;
48
49#[derive(Debug, Default, Clone, Serialize, Deserialize)]
53pub struct Percentiles {
54 pub p50: Duration,
56 pub p90: Duration,
58 pub p99: Duration,
60 pub mean: Duration,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
68pub struct Statistics {
69 pub mean: u128,
71 pub median: u128,
73 pub p90: u128,
75 pub p99: u128,
77 pub std_dev: f64,
79 pub variance: f64,
81 pub min: u128,
83 pub max: u128,
85 pub sample_count: usize,
87}
88
89#[derive(Debug, Default, Clone, Serialize, Deserialize)]
93pub struct BenchResult {
94 pub name: String,
96 pub module: String,
98 pub iterations: usize,
100 pub samples: usize,
102 pub percentiles: Percentiles,
104 pub all_timings: Vec<Duration>,
106 #[serde(default)]
108 pub cpu_samples: Vec<CpuSnapshot>,
109 #[serde(default)]
111 pub warmup_ms: Option<u128>,
112 #[serde(default)]
114 pub warmup_iterations: Option<u64>,
115}
116
117#[derive(Debug, Clone, Serialize, Deserialize)]
121pub struct Comparison {
122 pub current_mean: Duration,
124 pub baseline_mean: Duration,
126 pub percentage_change: f64,
128 #[serde(default)]
130 pub baseline_count: usize,
131 #[serde(skip_serializing_if = "Option::is_none")]
133 pub z_score: Option<f64>,
134 #[serde(skip_serializing_if = "Option::is_none")]
136 pub confidence_interval: Option<(f64, f64)>,
137 #[serde(skip_serializing_if = "Option::is_none")]
139 pub change_probability: Option<f64>,
140}
141
142pub struct SimpleBench {
151 pub name: &'static str,
153 pub module: &'static str,
155 pub run: fn(&crate::config::BenchmarkConfig) -> BenchResult,
157}
158
159inventory::collect!(SimpleBench);
160
161#[derive(Debug, Serialize, Deserialize)]
165pub struct BenchmarkInfo {
166 pub name: String,
168 pub module: String,
170}
171
172pub fn list_benchmarks_json() {
176 let benchmarks: Vec<BenchmarkInfo> = inventory::iter::<SimpleBench>()
177 .map(|b| BenchmarkInfo {
178 name: b.name.to_string(),
179 module: b.module.to_string(),
180 })
181 .collect();
182 println!("{}", serde_json::to_string(&benchmarks).unwrap());
183}
184
185pub fn run_single_benchmark_json(config: &crate::config::BenchmarkConfig) {
190 let bench_name = std::env::var("SIMPLEBENCH_BENCH_FILTER")
191 .expect("SIMPLEBENCH_BENCH_FILTER must be set for single benchmark execution");
192
193 let pin_core: usize = std::env::var("SIMPLEBENCH_PIN_CORE")
194 .ok()
195 .and_then(|s| s.parse().ok())
196 .unwrap_or(1); if let Err(e) = affinity::set_thread_affinity([pin_core]) {
200 eprintln!(
201 "Warning: Failed to set affinity to core {}: {:?}",
202 pin_core, e
203 );
204 }
205
206 for bench in inventory::iter::<SimpleBench>() {
208 if bench.name == bench_name {
209 let result = (bench.run)(config);
211 println!("{}", serde_json::to_string(&result).unwrap());
212 return;
213 }
214 }
215
216 eprintln!("ERROR: Benchmark '{}' not found", bench_name);
217 std::process::exit(1);
218}
219
220pub(crate) fn calculate_percentiles(timings: &[Duration]) -> Percentiles {
221 let mut sorted_timings = timings.to_vec();
222 sorted_timings.sort();
223
224 let len = sorted_timings.len();
225 let p50_idx = (len * 50) / 100;
226 let p90_idx = (len * 90) / 100;
227 let p99_idx = (len * 99) / 100;
228
229 let sum_nanos: u128 = timings.iter().map(|d| d.as_nanos()).sum();
231 let mean_nanos = sum_nanos / (len as u128);
232 let mean = Duration::from_nanos(mean_nanos as u64);
233
234 Percentiles {
235 p50: sorted_timings[p50_idx.min(len - 1)],
236 p90: sorted_timings[p90_idx.min(len - 1)],
237 p99: sorted_timings[p99_idx.min(len - 1)],
238 mean,
239 }
240}
241
242pub fn calculate_statistics(samples: &[u128]) -> Statistics {
244 let sample_count = samples.len();
245
246 if sample_count == 0 {
247 return Statistics {
248 mean: 0,
249 median: 0,
250 p90: 0,
251 p99: 0,
252 std_dev: 0.0,
253 variance: 0.0,
254 min: 0,
255 max: 0,
256 sample_count: 0,
257 };
258 }
259
260 let mut sorted = samples.to_vec();
262 sorted.sort();
263
264 let p50_idx = (sample_count * 50) / 100;
266 let p90_idx = (sample_count * 90) / 100;
267 let p99_idx = (sample_count * 99) / 100;
268
269 let median = sorted[p50_idx.min(sample_count - 1)];
270 let p90 = sorted[p90_idx.min(sample_count - 1)];
271 let p99 = sorted[p99_idx.min(sample_count - 1)];
272
273 let sum: u128 = samples.iter().sum();
275 let mean = sum / (sample_count as u128);
276
277 let mean_f64 = mean as f64;
279 let variance: f64 = samples
280 .iter()
281 .map(|&s| {
282 let diff = s as f64 - mean_f64;
283 diff * diff
284 })
285 .sum::<f64>()
286 / (sample_count as f64);
287
288 let std_dev = variance.sqrt();
289
290 let min = *sorted.first().unwrap();
292 let max = *sorted.last().unwrap();
293
294 Statistics {
295 mean,
296 median,
297 p90,
298 p99,
299 std_dev,
300 variance,
301 min,
302 max,
303 sample_count,
304 }
305}
306
307pub fn run_and_stream_benchmarks(config: &crate::config::BenchmarkConfig) -> Vec<BenchResult> {
312 use crate::baseline::{BaselineManager, ComparisonResult};
313 use crate::output::{
314 print_benchmark_result_line, print_comparison_line, print_new_baseline_line,
315 print_streaming_summary,
316 };
317 use colored::*;
318
319 match affinity::set_thread_affinity([0]) {
320 Ok(_) => println!(
321 "{} {}\n",
322 "Set affinity to core".green().bold(),
323 "0".cyan().bold()
324 ),
325 Err(e) => println!("Failed to set core affinity {e:?}"),
326 };
327
328 crate::cpu_monitor::verify_benchmark_environment(0);
330
331 let mut results = Vec::new();
332 let mut comparisons = Vec::new();
333
334 let baseline_manager = match BaselineManager::new() {
336 Ok(bm) => Some(bm),
337 Err(e) => {
338 eprintln!("Warning: Could not initialize baseline manager: {}", e);
339 eprintln!("Running without baseline comparison.");
340 None
341 }
342 };
343
344 let bench_filter = std::env::var("SIMPLEBENCH_BENCH_FILTER").ok();
346
347 let total_benchmarks: usize = inventory::iter::<SimpleBench>().count();
349 let filtered_count = if let Some(ref filter) = bench_filter {
350 inventory::iter::<SimpleBench>()
351 .filter(|b| b.name.contains(filter))
352 .count()
353 } else {
354 total_benchmarks
355 };
356
357 println!(
358 "{} {} {} {} {}",
359 "Running benchmarks with".green().bold(),
360 config.measurement.samples,
361 "samples ×".green().bold(),
362 config.measurement.iterations,
363 "iterations".green().bold()
364 );
365
366 if let Some(ref filter) = bench_filter {
367 println!(
368 "{} {} ({} matched filter: \"{}\")\n",
369 "Filtering to".dimmed(),
370 filtered_count,
371 if filtered_count == 1 {
372 "benchmark"
373 } else {
374 "benchmarks"
375 },
376 filter
377 );
378 } else {
379 println!();
380 }
381
382 for bench in inventory::iter::<SimpleBench> {
384 if let Some(ref filter) = bench_filter {
386 if !bench.name.contains(filter) {
387 continue; }
389 }
390 let result = (bench.run)(config);
392
393 print_benchmark_result_line(&result);
395
396 if let Some(ref bm) = baseline_manager {
398 let crate_name = result.module.split("::").next().unwrap_or("unknown");
399
400 let mut is_regression = false;
402 if let Ok(historical) =
403 bm.load_recent_baselines(crate_name, &result.name, config.comparison.window_size)
404 {
405 if !historical.is_empty() {
406 let comparison_result = crate::baseline::detect_regression_with_cpd(
408 &result,
409 &historical,
410 config.comparison.threshold,
411 config.comparison.confidence_level,
412 config.comparison.cp_threshold,
413 config.comparison.hazard_rate,
414 );
415
416 is_regression = comparison_result.is_regression;
417
418 if let Some(ref comparison) = comparison_result.comparison {
419 print_comparison_line(
420 comparison,
421 &result.name,
422 comparison_result.is_regression,
423 );
424 }
425
426 comparisons.push(comparison_result);
427 } else {
428 print_new_baseline_line(&result.name);
430
431 comparisons.push(ComparisonResult {
432 benchmark_name: result.name.clone(),
433 comparison: None,
434 is_regression: false,
435 });
436 }
437 }
438
439 if let Err(e) = bm.save_baseline(crate_name, &result, is_regression) {
441 eprintln!(
442 "Warning: Failed to save baseline for {}: {}",
443 result.name, e
444 );
445 }
446 }
447
448 results.push(result);
449 println!(); }
451
452 if !comparisons.is_empty() {
454 print_streaming_summary(&comparisons, &config.comparison);
455
456 if let Some(ref filter) = bench_filter {
458 println!(
459 "\n{} {} of {} total benchmarks (filter: \"{}\")",
460 "Ran".dimmed(),
461 filtered_count,
462 total_benchmarks,
463 filter
464 );
465 }
466 }
467
468 results
469}
470
471#[cfg(test)]
472mod tests {
473 use super::*;
474
475 #[test]
476 fn test_calculate_percentiles() {
477 let timings = vec![
478 Duration::from_millis(1),
479 Duration::from_millis(2),
480 Duration::from_millis(3),
481 Duration::from_millis(4),
482 Duration::from_millis(5),
483 Duration::from_millis(6),
484 Duration::from_millis(7),
485 Duration::from_millis(8),
486 Duration::from_millis(9),
487 Duration::from_millis(10),
488 ];
489
490 let percentiles = calculate_percentiles(&timings);
491
492 assert_eq!(percentiles.p50, Duration::from_millis(6));
495 assert_eq!(percentiles.p90, Duration::from_millis(10));
496 assert_eq!(percentiles.p99, Duration::from_millis(10));
497 assert_eq!(percentiles.mean, Duration::from_micros(5500));
498 }
499
500 #[test]
501 fn test_calculate_percentiles_single_element() {
502 let timings = vec![Duration::from_millis(5)];
503 let percentiles = calculate_percentiles(&timings);
504
505 assert_eq!(percentiles.p50, Duration::from_millis(5));
506 assert_eq!(percentiles.p90, Duration::from_millis(5));
507 assert_eq!(percentiles.p99, Duration::from_millis(5));
508 assert_eq!(percentiles.mean, Duration::from_millis(5));
509 }
510}