simplebench_runtime/
output.rs

1use crate::baseline::ComparisonResult;
2use crate::{BenchResult, Comparison};
3use colored::*;
4use serde_json;
5use std::fs;
6use std::path::Path;
7
8pub fn save_result_to_file<P: AsRef<Path>>(
9    result: &BenchResult,
10    path: P,
11) -> Result<(), Box<dyn std::error::Error>> {
12    let json = serde_json::to_string_pretty(result)?;
13    fs::write(path, json)?;
14    Ok(())
15}
16
17pub fn load_result_from_file<P: AsRef<Path>>(
18    path: P,
19) -> Result<BenchResult, Box<dyn std::error::Error>> {
20    let json = fs::read_to_string(path)?;
21    let result = serde_json::from_str(&json)?;
22    Ok(result)
23}
24
25pub fn save_results_to_file<P: AsRef<Path>>(
26    results: &[BenchResult],
27    path: P,
28) -> Result<(), Box<dyn std::error::Error>> {
29    let json = serde_json::to_string_pretty(results)?;
30    fs::write(path, json)?;
31    Ok(())
32}
33
34pub fn load_results_from_file<P: AsRef<Path>>(
35    path: P,
36) -> Result<Vec<BenchResult>, Box<dyn std::error::Error>> {
37    let json = fs::read_to_string(path)?;
38    let results = serde_json::from_str(&json)?;
39    Ok(results)
40}
41
42pub fn format_duration_human_readable(duration: std::time::Duration) -> String {
43    let nanos = duration.as_nanos();
44
45    if nanos < 1_000 {
46        format!("{}ns", nanos)
47    } else if nanos < 1_000_000 {
48        format!("{:.2}μs", nanos as f64 / 1_000.0)
49    } else if nanos < 1_000_000_000 {
50        format!("{:.2}ms", nanos as f64 / 1_000_000.0)
51    } else {
52        format!("{:.2}s", nanos as f64 / 1_000_000_000.0)
53    }
54}
55
56pub fn format_benchmark_result(result: &BenchResult) -> String {
57    let bench_name = format!("{}::{}", result.module, result.name);
58    let mean_str = format_duration_human_readable(result.percentiles.mean);
59    let p50_str = format_duration_human_readable(result.percentiles.p50);
60    let p90_str = format_duration_human_readable(result.percentiles.p90);
61    let p99_str = format_duration_human_readable(result.percentiles.p99);
62
63    // Calculate coefficient of variation (CV) from raw timings if available
64    let cv_str = if !result.all_timings.is_empty() {
65        let samples_ns: Vec<u128> = result.all_timings.iter().map(|d| d.as_nanos()).collect();
66        let stats = crate::calculate_statistics(&samples_ns);
67        let cv_pct = if stats.mean > 0 {
68            (stats.std_dev / stats.mean as f64) * 100.0
69        } else {
70            0.0
71        };
72        format!(", CV: {:.1}%", cv_pct)
73    } else {
74        String::new()
75    };
76
77    format!(
78        "{} {} mean: {}{}, p50: {}, p90: {}, p99: {}",
79        "BENCH".green().bold(),
80        bench_name.cyan(),
81        mean_str.cyan().bold(),
82        if !cv_str.is_empty() {
83            format!(" ({})", cv_str.trim_start_matches(", "))
84                .dimmed()
85                .to_string()
86        } else {
87            String::new()
88        },
89        p50_str.dimmed(),
90        p90_str.dimmed(),
91        p99_str.dimmed()
92    )
93}
94
95/// Format CPU statistics from samples
96pub fn format_cpu_stats(cpu_samples: &[crate::CpuSnapshot]) -> Option<String> {
97    if cpu_samples.is_empty() {
98        return None;
99    }
100
101    let frequencies: Vec<f64> = cpu_samples
102        .iter()
103        .filter_map(|s| s.frequency_mhz())
104        .collect();
105
106    let temperatures: Vec<f64> = cpu_samples
107        .iter()
108        .filter_map(|s| s.temperature_celsius())
109        .collect();
110
111    let mut parts = Vec::new();
112
113    // Frequency stats
114    if !frequencies.is_empty() {
115        let min_freq = frequencies.iter().copied().fold(f64::INFINITY, f64::min);
116        let max_freq = frequencies
117            .iter()
118            .copied()
119            .fold(f64::NEG_INFINITY, f64::max);
120        let mean_freq = frequencies.iter().sum::<f64>() / frequencies.len() as f64;
121        parts.push(format!(
122            "CPU: {:.0}-{:.0} MHz (mean: {:.0} MHz)",
123            min_freq, max_freq, mean_freq
124        ));
125    }
126
127    // Temperature stats
128    if !temperatures.is_empty() {
129        let min_temp = temperatures.iter().copied().fold(f64::INFINITY, f64::min);
130        let max_temp = temperatures
131            .iter()
132            .copied()
133            .fold(f64::NEG_INFINITY, f64::max);
134        let temp_increase = max_temp - min_temp;
135        parts.push(format!(
136            "Temp: {:.0}-{:.0}°C (+{:.0}°C)",
137            min_temp, max_temp, temp_increase
138        ));
139    }
140
141    if parts.is_empty() {
142        None
143    } else {
144        Some(parts.join(", "))
145    }
146}
147
148pub fn format_comparison_result(
149    comparison: &Comparison,
150    _benchmark_name: &str,
151    is_regression: bool,
152) -> String {
153    let change_symbol = if comparison.percentage_change > 0.0 {
154        "↗"
155    } else {
156        "↘"
157    };
158    let percentage_str = format!("{:.1}%", comparison.percentage_change.abs());
159    let baseline_str = format_duration_human_readable(comparison.baseline_mean);
160    let current_str = format_duration_human_readable(comparison.current_mean);
161
162    // Build baseline count suffix if available
163    let baseline_suffix = if comparison.baseline_count > 1 {
164        format!(" (n={})", comparison.baseline_count)
165    } else {
166        String::new()
167    };
168
169    let base_line = if is_regression {
170        format!(
171            "        {} {} {} (mean: {} -> {}{})",
172            "REGRESS".red().bold(),
173            change_symbol,
174            percentage_str.red().bold(),
175            baseline_str.dimmed(),
176            current_str.red(),
177            baseline_suffix.dimmed()
178        )
179    } else if comparison.percentage_change < -5.0 {
180        // Show improvements of >5% in green
181        format!(
182            "        {} {} {} (mean: {} -> {}{})",
183            "IMPROVE".green().bold(),
184            change_symbol,
185            percentage_str.green(),
186            baseline_str.dimmed(),
187            current_str.green(),
188            baseline_suffix.dimmed()
189        )
190    } else {
191        // Minor changes in yellow
192        format!(
193            "        {} {} {} (mean: {} -> {}{})",
194            "STABLE".cyan(),
195            change_symbol,
196            percentage_str.dimmed(),
197            baseline_str.dimmed(),
198            current_str.dimmed(),
199            baseline_suffix.dimmed()
200        )
201    };
202
203    // Add statistical info if available
204    let mut stats_parts = Vec::new();
205
206    if let Some(z_score) = comparison.z_score {
207        stats_parts.push(format!("z={:.2}", z_score));
208    }
209
210    if let Some(cp_prob) = comparison.change_probability {
211        stats_parts.push(format!("cp={:.0}%", cp_prob * 100.0));
212    }
213
214    if !stats_parts.is_empty() {
215        format!("{}\n        {}", base_line, stats_parts.join(", ").dimmed())
216    } else {
217        base_line
218    }
219}
220
221pub fn print_benchmark_start(bench_name: &str, module: &str) {
222    println!(
223        "   {} {}::{}",
224        "Running".cyan().bold(),
225        module.dimmed(),
226        bench_name
227    );
228}
229
230/// Print a single benchmark result line (for streaming output)
231pub fn print_benchmark_result_line(result: &BenchResult) {
232    println!("{}", format_benchmark_result(result));
233
234    // Print warmup stats if available
235    if let (Some(warmup_ms), Some(warmup_iters)) = (result.warmup_ms, result.warmup_iterations) {
236        println!(
237            "        {} {}ms ({} iterations)",
238            "Warmup:".dimmed(),
239            warmup_ms,
240            warmup_iters
241        );
242    }
243
244    // Print CPU stats if available (Linux only)
245    if let Some(cpu_stats) = format_cpu_stats(&result.cpu_samples) {
246        println!("        {}", cpu_stats.dimmed());
247    }
248}
249
250/// Print a single comparison line (for streaming output)
251pub fn print_comparison_line(comparison: &Comparison, benchmark_name: &str, is_regression: bool) {
252    println!(
253        "{}",
254        format_comparison_result(comparison, benchmark_name, is_regression)
255    );
256}
257
258/// Print "NEW" message for first baseline
259pub fn print_new_baseline_line(benchmark_name: &str) {
260    println!(
261        "        {} {} (establishing baseline)",
262        "NEW".blue().bold(),
263        benchmark_name.bright_white()
264    );
265}
266
267/// Print summary footer for streaming mode
268pub fn print_streaming_summary(
269    comparisons: &[ComparisonResult],
270    config: &crate::config::ComparisonConfig,
271) {
272    let regressions = comparisons.iter().filter(|c| c.is_regression).count();
273    let improvements = comparisons
274        .iter()
275        .filter(|c| {
276            c.comparison
277                .as_ref()
278                .map(|comp| comp.percentage_change < -5.0)
279                .unwrap_or(false)
280        })
281        .count();
282    let new_benchmarks = comparisons
283        .iter()
284        .filter(|c| c.comparison.is_none())
285        .count();
286    let stable = comparisons.len() - regressions - improvements - new_benchmarks;
287
288    println!("{}", "─".repeat(80).dimmed());
289    println!(
290        "{} {} total: {} {}, {} {}, {} {}{}",
291        "Summary:".cyan().bold(),
292        comparisons.len(),
293        stable,
294        "stable".dimmed(),
295        improvements,
296        "improved".green(),
297        regressions,
298        if regressions > 0 {
299            "regressed".red().bold()
300        } else {
301            "regressed".dimmed()
302        },
303        if new_benchmarks > 0 {
304            format!(", {} {}", new_benchmarks, "new".blue())
305        } else {
306            String::new()
307        }
308    );
309
310    if regressions > 0 {
311        println!(
312            "{} {} regression(s) detected (threshold: {}%)",
313            "Warning:".yellow().bold(),
314            regressions,
315            config.threshold
316        );
317    }
318}
319
320pub fn print_summary(results: &[BenchResult], comparisons: Option<&[ComparisonResult]>) {
321    // Print header
322    println!(
323        "{} {} {}",
324        "Running".green().bold(),
325        results.len(),
326        "Benchmarks".green().bold()
327    );
328    println!();
329
330    // Print individual benchmark results
331    for (i, result) in results.iter().enumerate() {
332        println!("{}", format_benchmark_result(result));
333
334        if let Some(comparisons) = comparisons {
335            if i < comparisons.len() {
336                if let Some(comparison) = &comparisons[i].comparison {
337                    println!(
338                        "{}",
339                        format_comparison_result(
340                            comparison,
341                            &result.name,
342                            comparisons[i].is_regression
343                        )
344                    );
345                } else {
346                    // First run - no baseline to compare against
347                    println!(
348                        "        {} {} (establishing baseline)",
349                        "NEW".blue().bold(),
350                        result.name.bright_white()
351                    );
352                }
353            }
354        }
355    }
356
357    println!();
358
359    // Print summary footer
360    if let Some(comparisons) = comparisons {
361        let regressions = comparisons.iter().filter(|c| c.is_regression).count();
362        let improvements = comparisons
363            .iter()
364            .filter(|c| {
365                c.comparison
366                    .as_ref()
367                    .map(|comp| comp.percentage_change < -5.0)
368                    .unwrap_or(false)
369            })
370            .count();
371        let new_benchmarks = comparisons
372            .iter()
373            .filter(|c| c.comparison.is_none())
374            .count();
375        let stable = comparisons.len() - regressions - improvements - new_benchmarks;
376
377        println!(
378            "{} {} total: {} {}, {} {}, {} {}{}",
379            "Summary:".cyan().bold(),
380            results.len(),
381            stable,
382            "stable".dimmed(),
383            improvements,
384            "improved".green(),
385            regressions,
386            if regressions > 0 {
387                "regressed".red().bold()
388            } else {
389                "regressed".dimmed()
390            },
391            if new_benchmarks > 0 {
392                format!(", {} {}", new_benchmarks, "new".blue())
393            } else {
394                String::new()
395            }
396        );
397
398        if regressions > 0 {
399            println!(
400                "{} {} regression(s) detected",
401                "warning:".yellow().bold(),
402                regressions
403            );
404        }
405    } else {
406        println!(
407            "{} running {} benchmarks",
408            "Finished".green().bold(),
409            results.len()
410        );
411    }
412}
413
414#[cfg(test)]
415mod tests {
416    use super::*;
417    use crate::Percentiles;
418    use std::time::Duration;
419    use tempfile::NamedTempFile;
420
421    fn create_test_result() -> BenchResult {
422        BenchResult {
423            name: "test_bench".to_string(),
424            module: "test_module".to_string(),
425            samples: 10,
426            percentiles: Percentiles {
427                p50: Duration::from_millis(5),
428                p90: Duration::from_millis(10),
429                p99: Duration::from_millis(15),
430                mean: Duration::from_millis(8),
431            },
432            all_timings: vec![Duration::from_millis(5); 10],
433            cpu_samples: vec![],
434            ..Default::default()
435        }
436    }
437
438    #[test]
439    fn test_save_and_load_result() {
440        let result = create_test_result();
441        let temp_file = NamedTempFile::new().unwrap();
442
443        save_result_to_file(&result, temp_file.path()).unwrap();
444        let loaded_result = load_result_from_file(temp_file.path()).unwrap();
445
446        assert_eq!(result.name, loaded_result.name);
447        assert_eq!(result.module, loaded_result.module);
448        assert_eq!(result.samples, loaded_result.samples);
449    }
450
451    #[test]
452    fn test_save_and_load_results() {
453        let results = vec![create_test_result(), create_test_result()];
454        let temp_file = NamedTempFile::new().unwrap();
455
456        save_results_to_file(&results, temp_file.path()).unwrap();
457        let loaded_results = load_results_from_file(temp_file.path()).unwrap();
458
459        assert_eq!(results.len(), loaded_results.len());
460        assert_eq!(results[0].name, loaded_results[0].name);
461    }
462
463    #[test]
464    fn test_format_duration_human_readable() {
465        assert_eq!(
466            format_duration_human_readable(Duration::from_nanos(500)),
467            "500ns"
468        );
469        assert_eq!(
470            format_duration_human_readable(Duration::from_micros(500)),
471            "500.00μs"
472        );
473        assert_eq!(
474            format_duration_human_readable(Duration::from_millis(500)),
475            "500.00ms"
476        );
477        assert_eq!(
478            format_duration_human_readable(Duration::from_secs(5)),
479            "5.00s"
480        );
481    }
482
483    #[test]
484    fn test_format_benchmark_result() {
485        let result = create_test_result();
486        let formatted = format_benchmark_result(&result);
487
488        assert!(formatted.contains("test_module::test_bench"));
489        assert!(formatted.contains("mean:"));
490        assert!(formatted.contains("p50:"));
491        assert!(formatted.contains("p90:"));
492        assert!(formatted.contains("p99:"));
493    }
494}