fluxbench_cli/executor/
report.rs1use super::execution::{BenchExecutionResult, ExecutionConfig};
32use super::metadata::build_report_meta;
33use fluxbench_report::{
34 BenchmarkMetrics, BenchmarkReportResult, BenchmarkStatus, FailureInfo, Report, ReportSummary,
35};
36use fluxbench_stats::{
37 BootstrapConfig, SummaryStatistics, compute_bootstrap, compute_cycles_stats,
38};
39use rayon::prelude::*;
40
41pub fn build_report(
54 results: &[BenchExecutionResult],
55 stats: &[(String, Option<SummaryStatistics>)],
56 config: &ExecutionConfig,
57 total_duration_ms: f64,
58) -> Report {
59 let stats_map: std::collections::HashMap<_, _> = stats.iter().cloned().collect();
61
62 let metrics_vec: Vec<_> = results
64 .par_iter()
65 .map(|result| {
66 let stats_opt = stats_map.get(&result.benchmark_id).cloned().flatten();
67
68 stats_opt.as_ref().map(|s| {
69 let bootstrap_config = BootstrapConfig {
71 iterations: config.bootstrap_iterations,
72 confidence_level: config.confidence_level,
73 ..Default::default()
74 };
75 let bootstrap_result = compute_bootstrap(&result.samples, &bootstrap_config);
76
77 let (ci_lower, ci_upper) = match bootstrap_result {
78 Ok(br) => (br.confidence_interval.lower, br.confidence_interval.upper),
79 Err(_) => (s.mean, s.mean), };
81
82 let throughput = if s.mean > 0.0 {
83 Some(1_000_000_000.0 / s.mean)
84 } else {
85 None
86 };
87
88 let cycles_stats = compute_cycles_stats(&result.cpu_cycles, &result.samples);
90
91 BenchmarkMetrics {
92 samples: s.sample_count,
93 mean_ns: s.mean,
94 median_ns: s.median,
95 std_dev_ns: s.std_dev,
96 min_ns: s.min,
97 max_ns: s.max,
98 p50_ns: s.p50,
99 p90_ns: s.p90,
100 p95_ns: s.p95,
101 p99_ns: s.p99,
102 p999_ns: s.p999,
103 skewness: s.skewness,
104 kurtosis: s.kurtosis,
105 ci_lower_ns: ci_lower,
106 ci_upper_ns: ci_upper,
107 ci_level: config.confidence_level,
108 throughput_ops_sec: throughput,
109 alloc_bytes: result.alloc_bytes,
110 alloc_count: result.alloc_count,
111 mean_cycles: cycles_stats.mean_cycles,
113 median_cycles: cycles_stats.median_cycles,
114 min_cycles: cycles_stats.min_cycles,
115 max_cycles: cycles_stats.max_cycles,
116 cycles_per_ns: cycles_stats.cycles_per_ns,
117 }
118 })
119 })
120 .collect();
121
122 let mut benchmark_results = Vec::with_capacity(results.len());
124 let mut summary = ReportSummary {
125 total_benchmarks: results.len(),
126 total_duration_ms,
127 ..Default::default()
128 };
129
130 for (result, metrics) in results.iter().zip(metrics_vec) {
131 let failure = result.error_message.as_ref().map(|msg| FailureInfo {
132 kind: result
133 .failure_kind
134 .clone()
135 .unwrap_or_else(|| "panic".to_string()),
136 message: msg.clone(),
137 backtrace: result.backtrace.clone(),
138 });
139
140 match result.status {
141 BenchmarkStatus::Passed => summary.passed += 1,
142 BenchmarkStatus::Failed => summary.failed += 1,
143 BenchmarkStatus::Crashed => summary.crashed += 1,
144 BenchmarkStatus::Skipped => summary.skipped += 1,
145 }
146
147 benchmark_results.push(BenchmarkReportResult {
148 id: result.benchmark_id.clone(),
149 name: result.benchmark_name.clone(),
150 group: result.group.clone(),
151 status: result.status,
152 severity: result.severity,
153 file: result.file.clone(),
154 line: result.line,
155 metrics,
156 comparison: None, failure,
158 });
159 }
160
161 Report {
162 meta: build_report_meta(config),
163 results: benchmark_results,
164 comparisons: Vec::new(), comparison_series: Vec::new(), synthetics: Vec::new(), verifications: Vec::new(), summary,
169 }
170}