Skip to main content

perfgate_render/
lib.rs

1//! Rendering utilities for perfgate output.
2//!
3//! This crate provides functions for rendering performance comparison results
4//! as markdown tables and GitHub Actions annotations.
5
6use anyhow::Context;
7use perfgate_types::{CompareReceipt, Direction, Metric, MetricStatistic, MetricStatus};
8use serde_json::json;
9
10/// Render a [`CompareReceipt`] as a Markdown table for PR comments.
11pub fn render_markdown(compare: &CompareReceipt) -> String {
12    let mut out = String::new();
13
14    let header = match compare.verdict.status {
15        perfgate_types::VerdictStatus::Pass => "✅ perfgate: pass",
16        perfgate_types::VerdictStatus::Warn => "⚠️ perfgate: warn",
17        perfgate_types::VerdictStatus::Fail => "❌ perfgate: fail",
18        perfgate_types::VerdictStatus::Skip => "⏭️ perfgate: skip",
19    };
20
21    out.push_str(header);
22    out.push_str("\n\n");
23
24    out.push_str(&format!("**Bench:** `{}`\n\n", compare.bench.name));
25
26    out.push_str("| metric | baseline (median) | current (median) | delta | budget | status |\n");
27    out.push_str("|---|---:|---:|---:|---:|---|\n");
28
29    for (metric, delta) in &compare.deltas {
30        let budget = compare.budgets.get(metric);
31        let (budget_str, direction_str) = if let Some(b) = budget {
32            (
33                format!("{:.1}%", b.threshold * 100.0),
34                direction_str(b.direction),
35            )
36        } else {
37            ("".to_string(), "")
38        };
39
40        let mut status_icon = metric_status_icon(delta.status).to_string();
41
42        // If noisy, append noise info
43        if let (Some(cv), Some(limit)) = (delta.cv, delta.noise_threshold)
44            && cv > limit
45        {
46            status_icon.push_str(" (noisy)");
47        }
48
49        out.push_str(&format!(
50            "| `{metric}` | {b} {u} | {c} {u} | {pct} | {budget} ({dir}) | {status} |\n",
51            metric = format_metric_with_statistic(*metric, delta.statistic),
52            b = format_value(*metric, delta.baseline),
53            c = format_value(*metric, delta.current),
54            u = metric.display_unit(),
55            pct = format_pct(delta.pct),
56            budget = budget_str,
57            dir = direction_str,
58            status = status_icon,
59        ));
60    }
61
62    if !compare.verdict.reasons.is_empty() {
63        out.push_str("\n**Notes:**\n");
64        for r in &compare.verdict.reasons {
65            out.push_str(&render_reason_line(compare, r));
66        }
67    }
68
69    out
70}
71
72/// Render a [`CompareReceipt`] using a custom [Handlebars](https://docs.rs/handlebars) template.
73pub fn render_markdown_template(
74    compare: &CompareReceipt,
75    template: &str,
76) -> anyhow::Result<String> {
77    let mut handlebars = handlebars::Handlebars::new();
78    handlebars.set_strict_mode(true);
79    handlebars
80        .register_template_string("markdown", template)
81        .context("parse markdown template")?;
82
83    let context = markdown_template_context(compare);
84    handlebars
85        .render("markdown", &context)
86        .context("render markdown template")
87}
88
89/// Produce GitHub Actions annotation strings from a [`CompareReceipt`].
90pub fn github_annotations(compare: &CompareReceipt) -> Vec<String> {
91    let mut lines = Vec::new();
92
93    for (metric, delta) in &compare.deltas {
94        let prefix = match delta.status {
95            MetricStatus::Fail => "::error",
96            MetricStatus::Warn => "::warning",
97            MetricStatus::Pass | MetricStatus::Skip => continue,
98        };
99
100        let msg = format!(
101            "perfgate {bench} {metric}: {pct} (baseline {b}{u}, current {c}{u})",
102            bench = compare.bench.name,
103            metric = format_metric_with_statistic(*metric, delta.statistic),
104            pct = format_pct(delta.pct),
105            b = format_value(*metric, delta.baseline),
106            c = format_value(*metric, delta.current),
107            u = metric.display_unit(),
108        );
109
110        lines.push(format!("{prefix}::{msg}"));
111    }
112
113    lines
114}
115
116/// Return the canonical string key for a [`Metric`].
117pub fn format_metric(metric: Metric) -> &'static str {
118    metric.as_str()
119}
120
121/// Format a metric key, appending the statistic name when it is not the default (median).
122pub fn format_metric_with_statistic(metric: Metric, statistic: MetricStatistic) -> String {
123    if statistic == MetricStatistic::Median {
124        format_metric(metric).to_string()
125    } else {
126        format!("{} ({})", format_metric(metric), statistic.as_str())
127    }
128}
129
130/// Build the JSON context object used by [`render_markdown_template`].
131pub fn markdown_template_context(compare: &CompareReceipt) -> serde_json::Value {
132    let header = match compare.verdict.status {
133        perfgate_types::VerdictStatus::Pass => "✅ perfgate: pass",
134        perfgate_types::VerdictStatus::Warn => "⚠️ perfgate: warn",
135        perfgate_types::VerdictStatus::Fail => "❌ perfgate: fail",
136        perfgate_types::VerdictStatus::Skip => "⏭️ perfgate: skip",
137    };
138
139    let rows: Vec<serde_json::Value> = compare
140        .deltas
141        .iter()
142        .map(|(metric, delta)| {
143            let budget = compare.budgets.get(metric);
144            let (budget_threshold_pct, budget_direction) = budget
145                .map(|b| (b.threshold * 100.0, direction_str(b.direction).to_string()))
146                .unwrap_or((0.0, String::new()));
147
148            json!({
149                "metric": format_metric(*metric),
150                "metric_with_statistic": format_metric_with_statistic(*metric, delta.statistic),
151                "statistic": delta.statistic.as_str(),
152                "baseline": format_value(*metric, delta.baseline),
153                "current": format_value(*metric, delta.current),
154                "unit": metric.display_unit(),
155                "delta_pct": format_pct(delta.pct),
156                "budget_threshold_pct": budget_threshold_pct,
157                "budget_direction": budget_direction,
158                "status": metric_status_str(delta.status),
159                "status_icon": metric_status_icon(delta.status),
160                "raw": {
161                    "baseline": delta.baseline,
162                    "current": delta.current,
163                    "pct": delta.pct,
164                    "regression": delta.regression,
165                    "statistic": delta.statistic.as_str(),
166                    "significance": delta.significance
167                }
168            })
169        })
170        .collect();
171
172    json!({
173        "header": header,
174        "bench": compare.bench,
175        "verdict": compare.verdict,
176        "rows": rows,
177        "reasons": compare.verdict.reasons,
178        "compare": compare
179    })
180}
181
182/// Parse a verdict reason token like `"wall_ms_warn"` into its metric and status.
183pub fn parse_reason_token(token: &str) -> Option<(Metric, MetricStatus)> {
184    let (metric_part, status_part) = token.rsplit_once('_')?;
185
186    let status = match status_part {
187        "warn" => MetricStatus::Warn,
188        "fail" => MetricStatus::Fail,
189        "skip" => MetricStatus::Skip,
190        _ => return None,
191    };
192
193    let metric = Metric::parse_key(metric_part)?;
194
195    Some((metric, status))
196}
197
198/// Render a single verdict reason token as a human-readable bullet line.
199pub fn render_reason_line(compare: &CompareReceipt, token: &str) -> String {
200    let context = parse_reason_token(token).and_then(|(metric, status)| {
201        compare
202            .deltas
203            .get(&metric)
204            .zip(compare.budgets.get(&metric))
205            .map(|(delta, budget)| (status, delta, budget))
206    });
207
208    if let Some((status, delta, budget)) = context {
209        let pct = format_pct(delta.pct);
210        let warn_pct = budget.warn_threshold * 100.0;
211        let fail_pct = budget.threshold * 100.0;
212
213        return match status {
214            MetricStatus::Warn => {
215                let mut msg =
216                    format!("- {token}: {pct} (warn >= {warn_pct:.2}%, fail > {fail_pct:.2}%)");
217                if let (Some(cv), Some(limit)) = (delta.cv, delta.noise_threshold)
218                    && cv > limit
219                {
220                    msg.push_str(&format!(
221                        " [NOISY: CV {:.2}% > limit {:.2}%]",
222                        cv * 100.0,
223                        limit * 100.0
224                    ));
225                }
226                msg.push('\n');
227                msg
228            }
229            MetricStatus::Fail => {
230                format!("- {token}: {pct} (fail > {fail_pct:.2}%)\n")
231            }
232            MetricStatus::Skip => {
233                let mut msg = format!("- {token}: skipped");
234                if let (Some(cv), Some(limit)) = (delta.cv, delta.noise_threshold)
235                    && cv > limit
236                {
237                    msg.push_str(&format!(
238                        " [NOISY: CV {:.2}% > limit {:.2}%]",
239                        cv * 100.0,
240                        limit * 100.0
241                    ));
242                }
243                msg.push('\n');
244                msg
245            }
246            MetricStatus::Pass => String::new(),
247        };
248    }
249
250    format!("- {token}\n")
251}
252
253/// Format a metric value for display.
254pub fn format_value(metric: Metric, v: f64) -> String {
255    match metric {
256        Metric::BinaryBytes
257        | Metric::CpuMs
258        | Metric::CtxSwitches
259        | Metric::EnergyUj
260        | Metric::IoReadBytes
261        | Metric::IoWriteBytes
262        | Metric::MaxRssKb
263        | Metric::NetworkPackets
264        | Metric::PageFaults
265        | Metric::WallMs => format!("{:.0}", v),
266        Metric::ThroughputPerS => format!("{:.3}", v),
267    }
268}
269
270/// Format a fractional change as a percentage string.
271pub fn format_pct(pct: f64) -> String {
272    let sign = if pct > 0.0 { "+" } else { "" };
273    format!("{}{:.2}%", sign, pct * 100.0)
274}
275
276/// Return a human-readable label for a budget [`Direction`].
277pub fn direction_str(direction: Direction) -> &'static str {
278    match direction {
279        Direction::Lower => "lower",
280        Direction::Higher => "higher",
281    }
282}
283
284/// Return an emoji icon for a [`MetricStatus`].
285pub fn metric_status_icon(status: MetricStatus) -> &'static str {
286    match status {
287        MetricStatus::Pass => "✅",
288        MetricStatus::Warn => "⚠️",
289        MetricStatus::Fail => "❌",
290        MetricStatus::Skip => "⏭️",
291    }
292}
293
294/// Return a lowercase string label for a [`MetricStatus`].
295pub fn metric_status_str(status: MetricStatus) -> &'static str {
296    match status {
297        MetricStatus::Pass => "pass",
298        MetricStatus::Warn => "warn",
299        MetricStatus::Fail => "fail",
300        MetricStatus::Skip => "skip",
301    }
302}
303
304#[cfg(test)]
305mod tests {
306    use super::*;
307    use perfgate_types::{
308        BenchMeta, Budget, CompareRef, Delta, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
309    };
310    use std::collections::BTreeMap;
311
312    fn make_compare_receipt(status: MetricStatus) -> CompareReceipt {
313        let mut budgets = BTreeMap::new();
314        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.1, Direction::Lower));
315
316        let mut deltas = BTreeMap::new();
317        deltas.insert(
318            Metric::WallMs,
319            Delta {
320                baseline: 100.0,
321                current: 115.0,
322                ratio: 1.15,
323                pct: 0.15,
324                regression: 0.15,
325                statistic: MetricStatistic::Median,
326                significance: None,
327                cv: None,
328                noise_threshold: None,
329                status,
330            },
331        );
332
333        CompareReceipt {
334            schema: perfgate_types::COMPARE_SCHEMA_V1.to_string(),
335            tool: ToolInfo {
336                name: "perfgate".into(),
337                version: "0.1.0".into(),
338            },
339            bench: BenchMeta {
340                name: "bench".into(),
341                cwd: None,
342                command: vec!["true".into()],
343                repeat: 1,
344                warmup: 0,
345                work_units: None,
346                timeout_ms: None,
347            },
348            baseline_ref: CompareRef {
349                path: None,
350                run_id: None,
351            },
352            current_ref: CompareRef {
353                path: None,
354                run_id: None,
355            },
356            budgets,
357            deltas,
358            verdict: Verdict {
359                status: VerdictStatus::Warn,
360                counts: VerdictCounts {
361                    pass: 0,
362                    warn: 1,
363                    fail: 0,
364                    skip: 0,
365                },
366                reasons: vec!["wall_ms_warn".to_string()],
367            },
368        }
369    }
370
371    #[test]
372    fn markdown_renders_table() {
373        let receipt = make_compare_receipt(MetricStatus::Pass);
374        let md = render_markdown(&receipt);
375        assert!(md.contains("| metric | baseline"));
376        assert!(md.contains("wall_ms"));
377    }
378
379    #[test]
380    fn markdown_template_renders_context_rows() {
381        let compare = make_compare_receipt(MetricStatus::Warn);
382        let template = "{{header}}\nbench={{bench.name}}\n{{#each rows}}metric={{metric}} status={{status}}\n{{/each}}";
383
384        let rendered = render_markdown_template(&compare, template).expect("render template");
385        assert!(rendered.contains("bench=bench"));
386        assert!(rendered.contains("metric=wall_ms"));
387        assert!(rendered.contains("status=warn"));
388    }
389
390    #[test]
391    fn parse_reason_token_handles_valid_and_invalid() {
392        let parsed = parse_reason_token("wall_ms_warn");
393        assert!(parsed.is_some());
394        let (metric, status) = parsed.unwrap();
395        assert_eq!(metric, Metric::WallMs);
396        assert_eq!(status, MetricStatus::Warn);
397
398        assert!(parse_reason_token("wall_ms_pass").is_none());
399        assert!(parse_reason_token("unknown_warn").is_none());
400    }
401
402    #[test]
403    fn github_annotations_only_warn_and_fail() {
404        let mut compare = make_compare_receipt(MetricStatus::Warn);
405        compare.deltas.insert(
406            Metric::MaxRssKb,
407            Delta {
408                baseline: 100.0,
409                current: 150.0,
410                ratio: 1.5,
411                pct: 0.5,
412                regression: 0.5,
413                statistic: MetricStatistic::Median,
414                significance: None,
415                cv: None,
416                noise_threshold: None,
417                status: MetricStatus::Fail,
418            },
419        );
420
421        let lines = github_annotations(&compare);
422        assert_eq!(lines.len(), 2);
423        assert!(lines.iter().any(|l| l.starts_with("::warning::")));
424        assert!(lines.iter().any(|l| l.starts_with("::error::")));
425    }
426}