1use serde::{Deserialize, Serialize};
39use std::time::Duration;
40
41pub mod baseline;
42pub mod changepoint;
43pub mod config;
44pub mod cpu_analysis;
45pub mod cpu_monitor;
46pub mod measurement;
47pub mod output;
48pub mod progress;
49pub mod statistics;
50
51pub use baseline::*;
52pub use changepoint::*;
53pub use config::*;
54pub use cpu_analysis::*;
55pub use cpu_monitor::*;
56pub use measurement::*;
57pub use output::*;
58pub use progress::*;
59pub use statistics::*;
60
61pub use inventory;
63
64#[derive(Debug, Default, Clone, Serialize, Deserialize)]
68pub struct Percentiles {
69 pub p50: Duration,
71 pub p90: Duration,
73 pub p99: Duration,
75 pub mean: Duration,
77}
78
79#[derive(Debug, Clone, Serialize, Deserialize)]
83pub struct Statistics {
84 pub mean: u128,
86 pub median: u128,
88 pub p90: u128,
90 pub p99: u128,
92 pub std_dev: f64,
94 pub variance: f64,
96 pub min: u128,
98 pub max: u128,
100 pub sample_count: usize,
102}
103
104#[derive(Debug, Default, Clone, Serialize, Deserialize)]
108pub struct BenchResult {
109 pub name: String,
111 pub module: String,
113 pub samples: usize,
115 pub percentiles: Percentiles,
117 pub all_timings: Vec<Duration>,
119 #[serde(default)]
121 pub cpu_samples: Vec<CpuSnapshot>,
122 #[serde(default)]
124 pub warmup_ms: Option<u128>,
125 #[serde(default)]
127 pub warmup_iterations: Option<u64>,
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct Comparison {
135 pub current_mean: Duration,
137 pub baseline_mean: Duration,
139 pub percentage_change: f64,
141 #[serde(default)]
143 pub baseline_count: usize,
144 #[serde(skip_serializing_if = "Option::is_none")]
146 pub z_score: Option<f64>,
147 #[serde(skip_serializing_if = "Option::is_none")]
149 pub confidence_interval: Option<(f64, f64)>,
150 #[serde(skip_serializing_if = "Option::is_none")]
152 pub change_probability: Option<f64>,
153}
154
155pub struct SimpleBench {
164 pub name: &'static str,
166 pub module: &'static str,
168 pub run: fn(&crate::config::BenchmarkConfig) -> BenchResult,
170}
171
172inventory::collect!(SimpleBench);
173
174#[derive(Debug, Serialize, Deserialize)]
178pub struct BenchmarkInfo {
179 pub name: String,
181 pub module: String,
183}
184
185pub fn list_benchmarks_json() {
189 let benchmarks: Vec<BenchmarkInfo> = inventory::iter::<SimpleBench>()
190 .map(|b| BenchmarkInfo {
191 name: b.name.to_string(),
192 module: b.module.to_string(),
193 })
194 .collect();
195 println!("{}", serde_json::to_string(&benchmarks).unwrap());
196}
197
198pub fn run_single_benchmark_json(config: &crate::config::BenchmarkConfig) {
203 let bench_name = std::env::var("SIMPLEBENCH_BENCH_FILTER")
204 .expect("SIMPLEBENCH_BENCH_FILTER must be set for single benchmark execution");
205
206 let pin_core: usize = std::env::var("SIMPLEBENCH_PIN_CORE")
207 .ok()
208 .and_then(|s| s.parse().ok())
209 .unwrap_or(1); if let Err(e) = affinity::set_thread_affinity([pin_core]) {
213 eprintln!(
214 "Warning: Failed to set affinity to core {}: {:?}",
215 pin_core, e
216 );
217 }
218
219 for bench in inventory::iter::<SimpleBench>() {
221 if bench.name == bench_name {
222 let result = (bench.run)(config);
224 println!("{}", serde_json::to_string(&result).unwrap());
225 return;
226 }
227 }
228
229 eprintln!("ERROR: Benchmark '{}' not found", bench_name);
230 std::process::exit(1);
231}
232
233pub(crate) fn calculate_percentiles(timings: &[Duration]) -> Percentiles {
234 let mut sorted_timings = timings.to_vec();
235 sorted_timings.sort();
236
237 let len = sorted_timings.len();
238 let p50_idx = (len * 50) / 100;
239 let p90_idx = (len * 90) / 100;
240 let p99_idx = (len * 99) / 100;
241
242 let sum_nanos: u128 = timings.iter().map(|d| d.as_nanos()).sum();
244 let mean_nanos = sum_nanos / (len as u128);
245 let mean = Duration::from_nanos(mean_nanos as u64);
246
247 Percentiles {
248 p50: sorted_timings[p50_idx.min(len - 1)],
249 p90: sorted_timings[p90_idx.min(len - 1)],
250 p99: sorted_timings[p99_idx.min(len - 1)],
251 mean,
252 }
253}
254
255pub fn calculate_statistics(samples: &[u128]) -> Statistics {
257 let sample_count = samples.len();
258
259 if sample_count == 0 {
260 return Statistics {
261 mean: 0,
262 median: 0,
263 p90: 0,
264 p99: 0,
265 std_dev: 0.0,
266 variance: 0.0,
267 min: 0,
268 max: 0,
269 sample_count: 0,
270 };
271 }
272
273 let mut sorted = samples.to_vec();
275 sorted.sort();
276
277 let p50_idx = (sample_count * 50) / 100;
279 let p90_idx = (sample_count * 90) / 100;
280 let p99_idx = (sample_count * 99) / 100;
281
282 let median = sorted[p50_idx.min(sample_count - 1)];
283 let p90 = sorted[p90_idx.min(sample_count - 1)];
284 let p99 = sorted[p99_idx.min(sample_count - 1)];
285
286 let sum: u128 = samples.iter().sum();
288 let mean = sum / (sample_count as u128);
289
290 let mean_f64 = mean as f64;
292 let variance: f64 = samples
293 .iter()
294 .map(|&s| {
295 let diff = s as f64 - mean_f64;
296 diff * diff
297 })
298 .sum::<f64>()
299 / (sample_count as f64);
300
301 let std_dev = variance.sqrt();
302
303 let min = *sorted.first().unwrap();
305 let max = *sorted.last().unwrap();
306
307 Statistics {
308 mean,
309 median,
310 p90,
311 p99,
312 std_dev,
313 variance,
314 min,
315 max,
316 sample_count,
317 }
318}
319
320pub fn run_and_stream_benchmarks(config: &crate::config::BenchmarkConfig) -> Vec<BenchResult> {
325 use crate::baseline::{BaselineManager, ComparisonResult};
326 use crate::output::{
327 print_benchmark_result_line, print_comparison_line, print_new_baseline_line,
328 print_streaming_summary,
329 };
330 use colored::*;
331
332 match affinity::set_thread_affinity([0]) {
333 Ok(_) => println!(
334 "{} {}\n",
335 "Set affinity to core".green().bold(),
336 "0".cyan().bold()
337 ),
338 Err(e) => println!("Failed to set core affinity {e:?}"),
339 };
340
341 crate::cpu_monitor::verify_benchmark_environment(0);
343
344 let mut results = Vec::new();
345 let mut comparisons = Vec::new();
346
347 let baseline_manager = match BaselineManager::new() {
349 Ok(bm) => Some(bm),
350 Err(e) => {
351 eprintln!("Warning: Could not initialize baseline manager: {}", e);
352 eprintln!("Running without baseline comparison.");
353 None
354 }
355 };
356
357 let bench_filter = std::env::var("SIMPLEBENCH_BENCH_FILTER").ok();
359
360 let total_benchmarks: usize = inventory::iter::<SimpleBench>().count();
362 let filtered_count = if let Some(ref filter) = bench_filter {
363 inventory::iter::<SimpleBench>()
364 .filter(|b| b.name.contains(filter))
365 .count()
366 } else {
367 total_benchmarks
368 };
369
370 println!(
371 "{} {} {}",
372 "Running benchmarks with".green().bold(),
373 config.measurement.samples,
374 "samples".green().bold()
375 );
376
377 if let Some(ref filter) = bench_filter {
378 println!(
379 "{} {} ({} matched filter: \"{}\")\n",
380 "Filtering to".dimmed(),
381 filtered_count,
382 if filtered_count == 1 {
383 "benchmark"
384 } else {
385 "benchmarks"
386 },
387 filter
388 );
389 } else {
390 println!();
391 }
392
393 for bench in inventory::iter::<SimpleBench> {
395 if let Some(ref filter) = bench_filter {
397 if !bench.name.contains(filter) {
398 continue; }
400 }
401 let result = (bench.run)(config);
403
404 print_benchmark_result_line(&result);
406
407 if let Some(ref bm) = baseline_manager {
409 let crate_name = result.module.split("::").next().unwrap_or("unknown");
410
411 let mut is_regression = false;
413 if let Ok(historical) =
414 bm.load_recent_baselines(crate_name, &result.name, config.comparison.window_size)
415 {
416 if !historical.is_empty() {
417 let comparison_result = crate::baseline::detect_regression_with_cpd(
419 &result,
420 &historical,
421 config.comparison.threshold,
422 config.comparison.confidence_level,
423 config.comparison.cp_threshold,
424 config.comparison.hazard_rate,
425 );
426
427 is_regression = comparison_result.is_regression;
428
429 if let Some(ref comparison) = comparison_result.comparison {
430 print_comparison_line(
431 comparison,
432 &result.name,
433 comparison_result.is_regression,
434 );
435 }
436
437 comparisons.push(comparison_result);
438 } else {
439 print_new_baseline_line(&result.name);
441
442 comparisons.push(ComparisonResult {
443 benchmark_name: result.name.clone(),
444 comparison: None,
445 is_regression: false,
446 });
447 }
448 }
449
450 if let Err(e) = bm.save_baseline(crate_name, &result, is_regression) {
452 eprintln!(
453 "Warning: Failed to save baseline for {}: {}",
454 result.name, e
455 );
456 }
457 }
458
459 results.push(result);
460 println!(); }
462
463 if !comparisons.is_empty() {
465 print_streaming_summary(&comparisons, &config.comparison);
466
467 if let Some(ref filter) = bench_filter {
469 println!(
470 "\n{} {} of {} total benchmarks (filter: \"{}\")",
471 "Ran".dimmed(),
472 filtered_count,
473 total_benchmarks,
474 filter
475 );
476 }
477 }
478
479 results
480}
481
482#[cfg(test)]
483mod tests {
484 use super::*;
485
486 #[test]
487 fn test_calculate_percentiles() {
488 let timings = vec![
489 Duration::from_millis(1),
490 Duration::from_millis(2),
491 Duration::from_millis(3),
492 Duration::from_millis(4),
493 Duration::from_millis(5),
494 Duration::from_millis(6),
495 Duration::from_millis(7),
496 Duration::from_millis(8),
497 Duration::from_millis(9),
498 Duration::from_millis(10),
499 ];
500
501 let percentiles = calculate_percentiles(&timings);
502
503 assert_eq!(percentiles.p50, Duration::from_millis(6));
506 assert_eq!(percentiles.p90, Duration::from_millis(10));
507 assert_eq!(percentiles.p99, Duration::from_millis(10));
508 assert_eq!(percentiles.mean, Duration::from_micros(5500));
509 }
510
511 #[test]
512 fn test_calculate_percentiles_single_element() {
513 let timings = vec![Duration::from_millis(5)];
514 let percentiles = calculate_percentiles(&timings);
515
516 assert_eq!(percentiles.p50, Duration::from_millis(5));
517 assert_eq!(percentiles.p90, Duration::from_millis(5));
518 assert_eq!(percentiles.p99, Duration::from_millis(5));
519 assert_eq!(percentiles.mean, Duration::from_millis(5));
520 }
521}