Skip to main content

perfgate_render/
lib.rs

1//! Rendering utilities for perfgate output.
2//!
3//! This crate provides functions for rendering performance comparison results
4//! as markdown tables and GitHub Actions annotations.
5//!
6//! # Example
7//!
8//! ```
9//! use perfgate_render::{render_markdown, github_annotations};
10//! use perfgate_types::{CompareReceipt, Delta, Metric, MetricStatus, MetricStatistic};
11//! use std::collections::BTreeMap;
12//!
13//! fn example() {
14//!     // Create a CompareReceipt (simplified example)
15//!     // let compare = CompareReceipt { ... };
16//!     // let markdown = render_markdown(&compare);
17//!     // let annotations = github_annotations(&compare);
18//! }
19//! ```
20
21use anyhow::Context;
22use perfgate_types::{CompareReceipt, Direction, Metric, MetricStatistic, MetricStatus};
23use serde_json::json;
24
25/// Render a [`CompareReceipt`] as a Markdown table for PR comments.
26///
27/// ```
28/// # use std::collections::BTreeMap;
29/// # use perfgate_types::*;
30/// let compare = CompareReceipt {
31///     schema: COMPARE_SCHEMA_V1.to_string(),
32///     tool: ToolInfo { name: "perfgate".into(), version: "0.1.0".into() },
33///     bench: BenchMeta {
34///         name: "my-bench".into(), cwd: None,
35///         command: vec!["echo".into()], repeat: 3, warmup: 0,
36///         work_units: None, timeout_ms: None,
37///     },
38///     baseline_ref: CompareRef { path: None, run_id: None },
39///     current_ref: CompareRef { path: None, run_id: None },
40///     budgets: BTreeMap::from([(Metric::WallMs, Budget {
41///         threshold: 0.20, warn_threshold: 0.18, direction: Direction::Lower,
42///     })]),
43///     deltas: BTreeMap::from([(Metric::WallMs, Delta {
44///         baseline: 100.0, current: 110.0, ratio: 1.1, pct: 0.1,
45///         regression: 0.1, statistic: MetricStatistic::Median,
46///         significance: None, status: MetricStatus::Pass,
47///     })]),
48///     verdict: Verdict {
49///         status: VerdictStatus::Pass,
50///         counts: VerdictCounts { pass: 1, warn: 0, fail: 0 },
51///         reasons: vec![],
52///     },
53/// };
54/// let md = perfgate_render::render_markdown(&compare);
55/// assert!(md.contains("✅ perfgate: pass"));
56/// assert!(md.contains("wall_ms"));
57/// ```
58pub fn render_markdown(compare: &CompareReceipt) -> String {
59    let mut out = String::new();
60
61    let header = match compare.verdict.status {
62        perfgate_types::VerdictStatus::Pass => "✅ perfgate: pass",
63        perfgate_types::VerdictStatus::Warn => "⚠️ perfgate: warn",
64        perfgate_types::VerdictStatus::Fail => "❌ perfgate: fail",
65    };
66
67    out.push_str(header);
68    out.push_str("\n\n");
69
70    out.push_str(&format!("**Bench:** `{}`\n\n", compare.bench.name));
71
72    out.push_str("| metric | baseline (median) | current (median) | delta | budget | status |\n");
73    out.push_str("|---|---:|---:|---:|---:|---|\n");
74
75    for (metric, delta) in &compare.deltas {
76        let budget = compare.budgets.get(metric);
77        let (budget_str, direction_str) = if let Some(b) = budget {
78            (
79                format!("{:.1}%", b.threshold * 100.0),
80                direction_str(b.direction),
81            )
82        } else {
83            ("".to_string(), "")
84        };
85
86        let status_icon = metric_status_icon(delta.status);
87
88        out.push_str(&format!(
89            "| `{metric}` | {b} {u} | {c} {u} | {pct} | {budget} ({dir}) | {status} |\n",
90            metric = format_metric_with_statistic(*metric, delta.statistic),
91            b = format_value(*metric, delta.baseline),
92            c = format_value(*metric, delta.current),
93            u = metric.display_unit(),
94            pct = format_pct(delta.pct),
95            budget = budget_str,
96            dir = direction_str,
97            status = status_icon,
98        ));
99    }
100
101    if !compare.verdict.reasons.is_empty() {
102        out.push_str("\n**Notes:**\n");
103        for r in &compare.verdict.reasons {
104            out.push_str(&render_reason_line(compare, r));
105        }
106    }
107
108    out
109}
110
111/// Render a [`CompareReceipt`] using a custom [Handlebars](https://docs.rs/handlebars) template.
112///
113/// # Examples
114///
115/// ```
116/// # use std::collections::BTreeMap;
117/// # use perfgate_types::*;
118/// let compare = CompareReceipt {
119///     schema: COMPARE_SCHEMA_V1.to_string(),
120///     tool: ToolInfo { name: "perfgate".into(), version: "0.1.0".into() },
121///     bench: BenchMeta {
122///         name: "my-bench".into(), cwd: None,
123///         command: vec!["echo".into()], repeat: 3, warmup: 0,
124///         work_units: None, timeout_ms: None,
125///     },
126///     baseline_ref: CompareRef { path: None, run_id: None },
127///     current_ref: CompareRef { path: None, run_id: None },
128///     budgets: BTreeMap::new(),
129///     deltas: BTreeMap::from([(Metric::WallMs, Delta {
130///         baseline: 100.0, current: 110.0, ratio: 1.1, pct: 0.1,
131///         regression: 0.1, statistic: MetricStatistic::Median,
132///         significance: None, status: MetricStatus::Pass,
133///     })]),
134///     verdict: Verdict {
135///         status: VerdictStatus::Pass,
136///         counts: VerdictCounts { pass: 1, warn: 0, fail: 0 },
137///         reasons: vec![],
138///     },
139/// };
140/// let rendered = perfgate_render::render_markdown_template(
141///     &compare,
142///     "Bench: {{bench.name}}, Verdict: {{verdict.status}}",
143/// ).unwrap();
144/// assert!(rendered.contains("Bench: my-bench"));
145/// assert!(rendered.contains("Verdict: pass"));
146/// ```
147pub fn render_markdown_template(
148    compare: &CompareReceipt,
149    template: &str,
150) -> anyhow::Result<String> {
151    let mut handlebars = handlebars::Handlebars::new();
152    handlebars.set_strict_mode(true);
153    handlebars
154        .register_template_string("markdown", template)
155        .context("parse markdown template")?;
156
157    let context = markdown_template_context(compare);
158    handlebars
159        .render("markdown", &context)
160        .context("render markdown template")
161}
162
163/// Produce GitHub Actions annotation strings from a [`CompareReceipt`].
164///
165/// Only failing/warning metrics generate annotations; passing metrics are skipped.
166///
167/// ```
168/// # use std::collections::BTreeMap;
169/// # use perfgate_types::*;
170/// let compare = CompareReceipt {
171///     schema: COMPARE_SCHEMA_V1.to_string(),
172///     tool: ToolInfo { name: "perfgate".into(), version: "0.1.0".into() },
173///     bench: BenchMeta {
174///         name: "my-bench".into(), cwd: None,
175///         command: vec!["echo".into()], repeat: 3, warmup: 0,
176///         work_units: None, timeout_ms: None,
177///     },
178///     baseline_ref: CompareRef { path: None, run_id: None },
179///     current_ref: CompareRef { path: None, run_id: None },
180///     budgets: BTreeMap::new(),
181///     deltas: BTreeMap::from([(Metric::WallMs, Delta {
182///         baseline: 100.0, current: 130.0, ratio: 1.3, pct: 0.3,
183///         regression: 0.3, statistic: MetricStatistic::Median,
184///         significance: None, status: MetricStatus::Fail,
185///     })]),
186///     verdict: Verdict {
187///         status: VerdictStatus::Fail,
188///         counts: VerdictCounts { pass: 0, warn: 0, fail: 1 },
189///         reasons: vec![],
190///     },
191/// };
192/// let annotations = perfgate_render::github_annotations(&compare);
193/// assert_eq!(annotations.len(), 1);
194/// assert!(annotations[0].starts_with("::error::"));
195/// ```
196pub fn github_annotations(compare: &CompareReceipt) -> Vec<String> {
197    let mut lines = Vec::new();
198
199    for (metric, delta) in &compare.deltas {
200        let prefix = match delta.status {
201            MetricStatus::Fail => "::error",
202            MetricStatus::Warn => "::warning",
203            MetricStatus::Pass => continue,
204        };
205
206        let msg = format!(
207            "perfgate {bench} {metric}: {pct} (baseline {b}{u}, current {c}{u})",
208            bench = compare.bench.name,
209            metric = format_metric_with_statistic(*metric, delta.statistic),
210            pct = format_pct(delta.pct),
211            b = format_value(*metric, delta.baseline),
212            c = format_value(*metric, delta.current),
213            u = metric.display_unit(),
214        );
215
216        lines.push(format!("{prefix}::{msg}"));
217    }
218
219    lines
220}
221
222/// Return the canonical string key for a [`Metric`].
223///
224/// # Examples
225///
226/// ```
227/// use perfgate_types::Metric;
228/// assert_eq!(perfgate_render::format_metric(Metric::WallMs), "wall_ms");
229/// assert_eq!(perfgate_render::format_metric(Metric::MaxRssKb), "max_rss_kb");
230/// ```
231pub fn format_metric(metric: Metric) -> &'static str {
232    metric.as_str()
233}
234
235/// Format a metric key, appending the statistic name when it is not the default (median).
236///
237/// # Examples
238///
239/// ```
240/// use perfgate_types::{Metric, MetricStatistic};
241/// assert_eq!(
242///     perfgate_render::format_metric_with_statistic(Metric::WallMs, MetricStatistic::Median),
243///     "wall_ms",
244/// );
245/// assert_eq!(
246///     perfgate_render::format_metric_with_statistic(Metric::WallMs, MetricStatistic::P95),
247///     "wall_ms (p95)",
248/// );
249/// ```
250pub fn format_metric_with_statistic(metric: Metric, statistic: MetricStatistic) -> String {
251    if statistic == MetricStatistic::Median {
252        format_metric(metric).to_string()
253    } else {
254        format!("{} ({})", format_metric(metric), statistic.as_str())
255    }
256}
257
258/// Build the JSON context object used by [`render_markdown_template`].
259///
260/// # Examples
261///
262/// ```
263/// # use std::collections::BTreeMap;
264/// # use perfgate_types::*;
265/// let compare = CompareReceipt {
266///     schema: COMPARE_SCHEMA_V1.to_string(),
267///     tool: ToolInfo { name: "perfgate".into(), version: "0.1.0".into() },
268///     bench: BenchMeta {
269///         name: "my-bench".into(), cwd: None,
270///         command: vec!["echo".into()], repeat: 1, warmup: 0,
271///         work_units: None, timeout_ms: None,
272///     },
273///     baseline_ref: CompareRef { path: None, run_id: None },
274///     current_ref: CompareRef { path: None, run_id: None },
275///     budgets: BTreeMap::new(),
276///     deltas: BTreeMap::new(),
277///     verdict: Verdict {
278///         status: VerdictStatus::Pass,
279///         counts: VerdictCounts { pass: 0, warn: 0, fail: 0 },
280///         reasons: vec![],
281///     },
282/// };
283/// let ctx = perfgate_render::markdown_template_context(&compare);
284/// assert_eq!(ctx["header"], "✅ perfgate: pass");
285/// assert!(ctx["rows"].as_array().unwrap().is_empty());
286/// ```
287pub fn markdown_template_context(compare: &CompareReceipt) -> serde_json::Value {
288    let header = match compare.verdict.status {
289        perfgate_types::VerdictStatus::Pass => "✅ perfgate: pass",
290        perfgate_types::VerdictStatus::Warn => "⚠️ perfgate: warn",
291        perfgate_types::VerdictStatus::Fail => "❌ perfgate: fail",
292    };
293
294    let rows: Vec<serde_json::Value> = compare
295        .deltas
296        .iter()
297        .map(|(metric, delta)| {
298            let budget = compare.budgets.get(metric);
299            let (budget_threshold_pct, budget_direction) = budget
300                .map(|b| (b.threshold * 100.0, direction_str(b.direction).to_string()))
301                .unwrap_or((0.0, String::new()));
302
303            json!({
304                "metric": format_metric(*metric),
305                "metric_with_statistic": format_metric_with_statistic(*metric, delta.statistic),
306                "statistic": delta.statistic.as_str(),
307                "baseline": format_value(*metric, delta.baseline),
308                "current": format_value(*metric, delta.current),
309                "unit": metric.display_unit(),
310                "delta_pct": format_pct(delta.pct),
311                "budget_threshold_pct": budget_threshold_pct,
312                "budget_direction": budget_direction,
313                "status": metric_status_str(delta.status),
314                "status_icon": metric_status_icon(delta.status),
315                "raw": {
316                    "baseline": delta.baseline,
317                    "current": delta.current,
318                    "pct": delta.pct,
319                    "regression": delta.regression,
320                    "statistic": delta.statistic.as_str(),
321                    "significance": delta.significance
322                }
323            })
324        })
325        .collect();
326
327    json!({
328        "header": header,
329        "bench": compare.bench,
330        "verdict": compare.verdict,
331        "rows": rows,
332        "reasons": compare.verdict.reasons,
333        "compare": compare
334    })
335}
336
337/// Parse a verdict reason token like `"wall_ms_warn"` into its metric and status.
338///
339/// Returns `None` for unrecognised metrics or non-warn/fail statuses.
340///
341/// # Examples
342///
343/// ```
344/// use perfgate_types::{Metric, MetricStatus};
345/// let (metric, status) = perfgate_render::parse_reason_token("wall_ms_warn").unwrap();
346/// assert_eq!(metric, Metric::WallMs);
347/// assert_eq!(status, MetricStatus::Warn);
348///
349/// assert!(perfgate_render::parse_reason_token("unknown_warn").is_none());
350/// ```
351pub fn parse_reason_token(token: &str) -> Option<(Metric, MetricStatus)> {
352    let (metric_part, status_part) = token.rsplit_once('_')?;
353
354    let status = match status_part {
355        "warn" => MetricStatus::Warn,
356        "fail" => MetricStatus::Fail,
357        _ => return None,
358    };
359
360    let metric = Metric::parse_key(metric_part)?;
361
362    Some((metric, status))
363}
364
365/// Render a single verdict reason token as a human-readable bullet line.
366///
367/// # Examples
368///
369/// ```
370/// # use std::collections::BTreeMap;
371/// # use perfgate_types::*;
372/// let compare = CompareReceipt {
373///     schema: COMPARE_SCHEMA_V1.to_string(),
374///     tool: ToolInfo { name: "perfgate".into(), version: "0.1.0".into() },
375///     bench: BenchMeta {
376///         name: "b".into(), cwd: None,
377///         command: vec!["echo".into()], repeat: 1, warmup: 0,
378///         work_units: None, timeout_ms: None,
379///     },
380///     baseline_ref: CompareRef { path: None, run_id: None },
381///     current_ref: CompareRef { path: None, run_id: None },
382///     budgets: BTreeMap::from([(Metric::WallMs, Budget {
383///         threshold: 0.20, warn_threshold: 0.10, direction: Direction::Lower,
384///     })]),
385///     deltas: BTreeMap::from([(Metric::WallMs, Delta {
386///         baseline: 100.0, current: 115.0, ratio: 1.15, pct: 0.15,
387///         regression: 0.15, statistic: MetricStatistic::Median,
388///         significance: None, status: MetricStatus::Warn,
389///     })]),
390///     verdict: Verdict {
391///         status: VerdictStatus::Warn,
392///         counts: VerdictCounts { pass: 0, warn: 1, fail: 0 },
393///         reasons: vec!["wall_ms_warn".into()],
394///     },
395/// };
396/// let line = perfgate_render::render_reason_line(&compare, "wall_ms_warn");
397/// assert!(line.starts_with("- wall_ms_warn:"));
398/// assert!(line.contains("+15.00%"));
399/// ```
400pub fn render_reason_line(compare: &CompareReceipt, token: &str) -> String {
401    if let Some((metric, status)) = parse_reason_token(token)
402        && let (Some(delta), Some(budget)) =
403            (compare.deltas.get(&metric), compare.budgets.get(&metric))
404    {
405        let pct = format_pct(delta.pct);
406        let warn_pct = budget.warn_threshold * 100.0;
407        let fail_pct = budget.threshold * 100.0;
408
409        return match status {
410            MetricStatus::Warn => {
411                format!("- {token}: {pct} (warn >= {warn_pct:.2}%, fail > {fail_pct:.2}%)\n")
412            }
413            MetricStatus::Fail => {
414                format!("- {token}: {pct} (fail > {fail_pct:.2}%)\n")
415            }
416            MetricStatus::Pass => format!("- {token}\n"),
417        };
418    }
419
420    format!("- {token}\n")
421}
422
423/// Format a metric value for display.
424///
425/// Integer metrics (wall_ms, max_rss_kb, …) are rounded; throughput uses 3 decimals.
426///
427/// ```
428/// use perfgate_types::Metric;
429/// assert_eq!(perfgate_render::format_value(Metric::WallMs, 123.4), "123");
430/// assert_eq!(perfgate_render::format_value(Metric::ThroughputPerS, 1.5), "1.500");
431/// assert_eq!(perfgate_render::format_value(Metric::MaxRssKb, 2048.0), "2048");
432/// ```
433pub fn format_value(metric: Metric, v: f64) -> String {
434    match metric {
435        Metric::BinaryBytes
436        | Metric::CpuMs
437        | Metric::CtxSwitches
438        | Metric::MaxRssKb
439        | Metric::PageFaults
440        | Metric::WallMs => format!("{:.0}", v),
441        Metric::ThroughputPerS => format!("{:.3}", v),
442    }
443}
444
445/// Format a fractional change as a percentage string.
446///
447/// ```
448/// assert_eq!(perfgate_render::format_pct(0.1), "+10.00%");
449/// assert_eq!(perfgate_render::format_pct(-0.05), "-5.00%");
450/// assert_eq!(perfgate_render::format_pct(0.0), "0.00%");
451/// ```
452pub fn format_pct(pct: f64) -> String {
453    let sign = if pct > 0.0 { "+" } else { "" };
454    format!("{}{:.2}%", sign, pct * 100.0)
455}
456
457/// Return a human-readable label for a budget [`Direction`].
458///
459/// # Examples
460///
461/// ```
462/// use perfgate_types::Direction;
463/// assert_eq!(perfgate_render::direction_str(Direction::Lower), "lower");
464/// assert_eq!(perfgate_render::direction_str(Direction::Higher), "higher");
465/// ```
466pub fn direction_str(direction: Direction) -> &'static str {
467    match direction {
468        Direction::Lower => "lower",
469        Direction::Higher => "higher",
470    }
471}
472
473/// Return an emoji icon for a [`MetricStatus`].
474///
475/// # Examples
476///
477/// ```
478/// use perfgate_types::MetricStatus;
479/// assert_eq!(perfgate_render::metric_status_icon(MetricStatus::Pass), "✅");
480/// assert_eq!(perfgate_render::metric_status_icon(MetricStatus::Warn), "⚠️");
481/// assert_eq!(perfgate_render::metric_status_icon(MetricStatus::Fail), "❌");
482/// ```
483pub fn metric_status_icon(status: MetricStatus) -> &'static str {
484    match status {
485        MetricStatus::Pass => "✅",
486        MetricStatus::Warn => "⚠️",
487        MetricStatus::Fail => "❌",
488    }
489}
490
491/// Return a lowercase string label for a [`MetricStatus`].
492///
493/// # Examples
494///
495/// ```
496/// use perfgate_types::MetricStatus;
497/// assert_eq!(perfgate_render::metric_status_str(MetricStatus::Pass), "pass");
498/// assert_eq!(perfgate_render::metric_status_str(MetricStatus::Warn), "warn");
499/// assert_eq!(perfgate_render::metric_status_str(MetricStatus::Fail), "fail");
500/// ```
501pub fn metric_status_str(status: MetricStatus) -> &'static str {
502    match status {
503        MetricStatus::Pass => "pass",
504        MetricStatus::Warn => "warn",
505        MetricStatus::Fail => "fail",
506    }
507}
508
509#[cfg(test)]
510mod tests {
511    use super::*;
512    use perfgate_types::{
513        BenchMeta, Budget, CompareRef, Delta, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
514    };
515    use std::collections::BTreeMap;
516
517    fn make_compare_receipt(status: MetricStatus) -> CompareReceipt {
518        let mut budgets = BTreeMap::new();
519        budgets.insert(
520            Metric::WallMs,
521            Budget {
522                threshold: 0.2,
523                warn_threshold: 0.1,
524                direction: Direction::Lower,
525            },
526        );
527
528        let mut deltas = BTreeMap::new();
529        deltas.insert(
530            Metric::WallMs,
531            Delta {
532                baseline: 100.0,
533                current: 115.0,
534                ratio: 1.15,
535                pct: 0.15,
536                regression: 0.15,
537                statistic: MetricStatistic::Median,
538                significance: None,
539                status,
540            },
541        );
542
543        CompareReceipt {
544            schema: perfgate_types::COMPARE_SCHEMA_V1.to_string(),
545            tool: ToolInfo {
546                name: "perfgate".into(),
547                version: "0.1.0".into(),
548            },
549            bench: BenchMeta {
550                name: "bench".into(),
551                cwd: None,
552                command: vec!["true".into()],
553                repeat: 1,
554                warmup: 0,
555                work_units: None,
556                timeout_ms: None,
557            },
558            baseline_ref: CompareRef {
559                path: None,
560                run_id: None,
561            },
562            current_ref: CompareRef {
563                path: None,
564                run_id: None,
565            },
566            budgets,
567            deltas,
568            verdict: Verdict {
569                status: VerdictStatus::Warn,
570                counts: VerdictCounts {
571                    pass: 0,
572                    warn: 1,
573                    fail: 0,
574                },
575                reasons: vec!["wall_ms_warn".to_string()],
576            },
577        }
578    }
579
580    #[test]
581    fn markdown_renders_table() {
582        let mut budgets = BTreeMap::new();
583        budgets.insert(
584            Metric::WallMs,
585            Budget {
586                threshold: 0.2,
587                warn_threshold: 0.18,
588                direction: Direction::Lower,
589            },
590        );
591
592        let mut deltas = BTreeMap::new();
593        deltas.insert(
594            Metric::WallMs,
595            Delta {
596                baseline: 1000.0,
597                current: 1100.0,
598                ratio: 1.1,
599                pct: 0.1,
600                regression: 0.1,
601                statistic: MetricStatistic::Median,
602                significance: None,
603                status: MetricStatus::Pass,
604            },
605        );
606
607        let compare = CompareReceipt {
608            schema: perfgate_types::COMPARE_SCHEMA_V1.to_string(),
609            tool: ToolInfo {
610                name: "perfgate".into(),
611                version: "0.1.0".into(),
612            },
613            bench: BenchMeta {
614                name: "demo".into(),
615                cwd: None,
616                command: vec!["true".into()],
617                repeat: 1,
618                warmup: 0,
619                work_units: None,
620                timeout_ms: None,
621            },
622            baseline_ref: CompareRef {
623                path: None,
624                run_id: None,
625            },
626            current_ref: CompareRef {
627                path: None,
628                run_id: None,
629            },
630            budgets,
631            deltas,
632            verdict: Verdict {
633                status: VerdictStatus::Pass,
634                counts: VerdictCounts {
635                    pass: 1,
636                    warn: 0,
637                    fail: 0,
638                },
639                reasons: vec![],
640            },
641        };
642
643        let md = render_markdown(&compare);
644        assert!(md.contains("| metric | baseline"));
645        assert!(md.contains("wall_ms"));
646    }
647
648    #[test]
649    fn markdown_template_renders_context_rows() {
650        let compare = make_compare_receipt(MetricStatus::Warn);
651        let template = "{{header}}\nbench={{bench.name}}\n{{#each rows}}metric={{metric}} status={{status}}\n{{/each}}";
652
653        let rendered = render_markdown_template(&compare, template).expect("render template");
654        assert!(rendered.contains("bench=bench"));
655        assert!(rendered.contains("metric=wall_ms"));
656        assert!(rendered.contains("status=warn"));
657    }
658
659    #[test]
660    fn markdown_template_strict_mode_rejects_unknown_fields() {
661        let compare = make_compare_receipt(MetricStatus::Warn);
662        let err = render_markdown_template(&compare, "{{does_not_exist}}").unwrap_err();
663        assert!(
664            err.to_string().contains("render markdown template"),
665            "unexpected error: {}",
666            err
667        );
668    }
669
670    #[test]
671    fn parse_reason_token_handles_valid_and_invalid() {
672        let parsed = parse_reason_token("wall_ms_warn");
673        assert!(parsed.is_some());
674        let (metric, status) = parsed.unwrap();
675        assert_eq!(metric, Metric::WallMs);
676        assert_eq!(status, MetricStatus::Warn);
677
678        assert!(parse_reason_token("wall_ms_pass").is_none());
679        assert!(parse_reason_token("unknown_warn").is_none());
680    }
681
682    #[test]
683    fn render_reason_line_formats_thresholds() {
684        let compare = make_compare_receipt(MetricStatus::Warn);
685        let line = render_reason_line(&compare, "wall_ms_warn");
686        assert!(line.contains("warn >="));
687        assert!(line.contains("fail >"));
688        assert!(line.contains("+15.00%"));
689    }
690
691    #[test]
692    fn render_reason_line_falls_back_when_missing_budget() {
693        let mut compare = make_compare_receipt(MetricStatus::Warn);
694        compare.budgets.clear();
695        let line = render_reason_line(&compare, "wall_ms_warn");
696        assert_eq!(line, "- wall_ms_warn\n");
697    }
698
699    #[test]
700    fn format_value_and_pct_render_expected_strings() {
701        assert_eq!(format_value(Metric::ThroughputPerS, 1.23456), "1.235");
702        assert_eq!(format_value(Metric::WallMs, 123.0), "123");
703        assert_eq!(format_pct(0.1), "+10.00%");
704        assert_eq!(format_pct(-0.1), "-10.00%");
705        assert_eq!(format_pct(0.0), "0.00%");
706    }
707
708    #[test]
709    fn github_annotations_only_warn_and_fail() {
710        let mut compare = make_compare_receipt(MetricStatus::Warn);
711        compare.deltas.insert(
712            Metric::MaxRssKb,
713            Delta {
714                baseline: 100.0,
715                current: 150.0,
716                ratio: 1.5,
717                pct: 0.5,
718                regression: 0.5,
719                statistic: MetricStatistic::Median,
720                significance: None,
721                status: MetricStatus::Fail,
722            },
723        );
724        compare.deltas.insert(
725            Metric::ThroughputPerS,
726            Delta {
727                baseline: 100.0,
728                current: 90.0,
729                ratio: 0.9,
730                pct: -0.1,
731                regression: 0.0,
732                statistic: MetricStatistic::Median,
733                significance: None,
734                status: MetricStatus::Pass,
735            },
736        );
737
738        let lines = github_annotations(&compare);
739        assert_eq!(lines.len(), 2);
740        assert!(lines.iter().any(|l| l.starts_with("::warning::")));
741        assert!(lines.iter().any(|l| l.starts_with("::error::")));
742        assert!(lines.iter().all(|l| !l.contains("throughput_per_s")));
743    }
744
745    #[test]
746    fn format_metric_with_statistic_displays_correctly() {
747        assert_eq!(
748            format_metric_with_statistic(Metric::WallMs, MetricStatistic::Median),
749            "wall_ms"
750        );
751        assert_eq!(
752            format_metric_with_statistic(Metric::WallMs, MetricStatistic::P95),
753            "wall_ms (p95)"
754        );
755    }
756
757    #[test]
758    fn direction_str_returns_correct_strings() {
759        assert_eq!(direction_str(Direction::Lower), "lower");
760        assert_eq!(direction_str(Direction::Higher), "higher");
761    }
762
763    #[test]
764    fn metric_status_str_returns_correct_strings() {
765        assert_eq!(metric_status_str(MetricStatus::Pass), "pass");
766        assert_eq!(metric_status_str(MetricStatus::Warn), "warn");
767        assert_eq!(metric_status_str(MetricStatus::Fail), "fail");
768    }
769
770    #[test]
771    fn metric_status_icon_returns_correct_emojis() {
772        assert_eq!(metric_status_icon(MetricStatus::Pass), "✅");
773        assert_eq!(metric_status_icon(MetricStatus::Warn), "⚠️");
774        assert_eq!(metric_status_icon(MetricStatus::Fail), "❌");
775    }
776
777    #[test]
778    fn snapshot_markdown_rendering() {
779        let compare = make_compare_receipt(MetricStatus::Warn);
780        let md = render_markdown(&compare);
781        insta::assert_snapshot!(md, @r###"
782        ⚠️ perfgate: warn
783
784        **Bench:** `bench`
785
786        | metric | baseline (median) | current (median) | delta | budget | status |
787        |---|---:|---:|---:|---:|---|
788        | `wall_ms` | 100 ms | 115 ms | +15.00% | 20.0% (lower) | ⚠️ |
789
790        **Notes:**
791        - wall_ms_warn: +15.00% (warn >= 10.00%, fail > 20.00%)
792        "###);
793    }
794
795    #[test]
796    fn template_custom_basic_variables() {
797        let compare = make_compare_receipt(MetricStatus::Pass);
798        let template = "Verdict: {{verdict.status}}\nBench: {{bench.name}}\nHeader: {{header}}";
799        let rendered = render_markdown_template(&compare, template).expect("basic variables");
800        assert!(rendered.contains("Bench: bench"));
801        assert!(rendered.contains("Header:"));
802    }
803
804    #[test]
805    fn template_missing_variable_returns_error() {
806        let compare = make_compare_receipt(MetricStatus::Pass);
807        let result = render_markdown_template(&compare, "{{nonexistent_var}}");
808        assert!(
809            result.is_err(),
810            "strict mode should reject missing variables"
811        );
812    }
813
814    #[test]
815    fn template_empty_deltas_renders_no_rows() {
816        let mut compare = make_compare_receipt(MetricStatus::Pass);
817        compare.deltas.clear();
818        compare.budgets.clear();
819        let template = "rows:{{#each rows}}[{{metric}}]{{/each}}end";
820        let rendered = render_markdown_template(&compare, template).expect("empty data");
821        assert_eq!(rendered, "rows:end");
822    }
823
824    #[test]
825    fn template_conditional_verdict_pass() {
826        let mut compare = make_compare_receipt(MetricStatus::Pass);
827        compare.verdict.status = VerdictStatus::Pass;
828        // Handlebars doesn't have built-in `eq` helper, so use string comparison approach
829        let template = "{{verdict.status}}";
830        let rendered = render_markdown_template(&compare, template).expect("verdict pass");
831        assert_eq!(rendered, "pass");
832    }
833
834    #[test]
835    fn template_conditional_verdict_warn() {
836        let mut compare = make_compare_receipt(MetricStatus::Warn);
837        compare.verdict.status = VerdictStatus::Warn;
838        let template = "status={{verdict.status}}";
839        let rendered = render_markdown_template(&compare, template).expect("verdict warn");
840        assert_eq!(rendered, "status=warn");
841    }
842
843    #[test]
844    fn template_conditional_verdict_fail() {
845        let mut compare = make_compare_receipt(MetricStatus::Fail);
846        compare.verdict.status = VerdictStatus::Fail;
847        let template = "{{#if verdict.reasons}}REASONS:{{#each verdict.reasons}}{{this}},{{/each}}{{else}}NO_REASONS{{/if}}";
848        let rendered = render_markdown_template(&compare, template).expect("verdict fail");
849        assert!(rendered.contains("REASONS:"));
850        assert!(rendered.contains("wall_ms_warn"));
851    }
852
853    #[test]
854    fn template_conditional_on_rows_status() {
855        let compare = make_compare_receipt(MetricStatus::Warn);
856        // Handlebars without custom helpers - use simpler approach
857        let template = "{{#each rows}}{{status_icon}} {{metric}} is {{status}}\n{{/each}}";
858        let rendered = render_markdown_template(&compare, template).expect("row status");
859        assert!(rendered.contains("wall_ms is warn"));
860        assert!(rendered.contains("⚠️"));
861    }
862
863    #[test]
864    fn snapshot_github_annotations() {
865        let mut compare = make_compare_receipt(MetricStatus::Fail);
866        compare.deltas.insert(
867            Metric::MaxRssKb,
868            Delta {
869                baseline: 100.0,
870                current: 150.0,
871                ratio: 1.5,
872                pct: 0.5,
873                regression: 0.5,
874                statistic: MetricStatistic::Median,
875                significance: None,
876                status: MetricStatus::Warn,
877            },
878        );
879        let annotations = github_annotations(&compare);
880        insta::assert_debug_snapshot!(annotations, @r###"
881        [
882            "::warning::perfgate bench max_rss_kb: +50.00% (baseline 100KB, current 150KB)",
883            "::error::perfgate bench wall_ms: +15.00% (baseline 100ms, current 115ms)",
884        ]
885        "###);
886    }
887
888    // ── Template rendering error paths ──────────────────────────────────
889
890    #[test]
891    fn template_invalid_syntax_returns_error() {
892        let compare = make_compare_receipt(MetricStatus::Pass);
893        let result = render_markdown_template(&compare, "{{#if}}unclosed");
894        assert!(result.is_err(), "malformed template should fail to parse");
895        assert!(
896            result
897                .unwrap_err()
898                .to_string()
899                .contains("parse markdown template"),
900        );
901    }
902
903    #[test]
904    fn template_nested_missing_variable_returns_error() {
905        let compare = make_compare_receipt(MetricStatus::Pass);
906        let result = render_markdown_template(&compare, "{{bench.nonexistent_field}}");
907        assert!(
908            result.is_err(),
909            "strict mode should reject missing nested variable"
910        );
911    }
912
913    #[test]
914    fn snapshot_markdown_empty_deltas() {
915        let mut compare = make_compare_receipt(MetricStatus::Pass);
916        compare.deltas.clear();
917        compare.budgets.clear();
918        compare.verdict.reasons.clear();
919        compare.verdict.status = perfgate_types::VerdictStatus::Pass;
920        compare.verdict.counts = VerdictCounts {
921            pass: 0,
922            warn: 0,
923            fail: 0,
924        };
925        let md = render_markdown(&compare);
926        insta::assert_snapshot!(md, @r###"
927        ✅ perfgate: pass
928
929        **Bench:** `bench`
930
931        | metric | baseline (median) | current (median) | delta | budget | status |
932        |---|---:|---:|---:|---:|---|
933        "###);
934    }
935
936    #[test]
937    fn github_annotations_empty_deltas() {
938        let mut compare = make_compare_receipt(MetricStatus::Pass);
939        compare.deltas.clear();
940        let annotations = github_annotations(&compare);
941        assert!(annotations.is_empty());
942    }
943
944    #[test]
945    fn snapshot_markdown_all_passing_multi_metric() {
946        let mut compare = make_compare_receipt(MetricStatus::Pass);
947        compare.verdict.status = perfgate_types::VerdictStatus::Pass;
948        compare.verdict.reasons.clear();
949        compare.verdict.counts = VerdictCounts {
950            pass: 3,
951            warn: 0,
952            fail: 0,
953        };
954        compare.budgets.insert(
955            Metric::MaxRssKb,
956            Budget {
957                threshold: 0.3,
958                warn_threshold: 0.2,
959                direction: Direction::Lower,
960            },
961        );
962        compare.budgets.insert(
963            Metric::ThroughputPerS,
964            Budget {
965                threshold: 0.15,
966                warn_threshold: 0.1,
967                direction: Direction::Higher,
968            },
969        );
970        compare.deltas.insert(
971            Metric::MaxRssKb,
972            Delta {
973                baseline: 500.0,
974                current: 490.0,
975                ratio: 0.98,
976                pct: -0.02,
977                regression: 0.0,
978                statistic: MetricStatistic::Median,
979                significance: None,
980                status: MetricStatus::Pass,
981            },
982        );
983        compare.deltas.insert(
984            Metric::ThroughputPerS,
985            Delta {
986                baseline: 50.0,
987                current: 52.0,
988                ratio: 1.04,
989                pct: 0.04,
990                regression: 0.0,
991                statistic: MetricStatistic::Median,
992                significance: None,
993                status: MetricStatus::Pass,
994            },
995        );
996        let md = render_markdown(&compare);
997        // All passing → no Notes section
998        assert!(!md.contains("**Notes:**"));
999        insta::assert_snapshot!(md, @r###"
1000        ✅ perfgate: pass
1001
1002        **Bench:** `bench`
1003
1004        | metric | baseline (median) | current (median) | delta | budget | status |
1005        |---|---:|---:|---:|---:|---|
1006        | `max_rss_kb` | 500 KB | 490 KB | -2.00% | 30.0% (lower) | ✅ |
1007        | `throughput_per_s` | 50.000 /s | 52.000 /s | +4.00% | 15.0% (higher) | ✅ |
1008        | `wall_ms` | 100 ms | 115 ms | +15.00% | 20.0% (lower) | ✅ |
1009        "###);
1010    }
1011
1012    #[test]
1013    fn github_annotations_all_passing_yields_none() {
1014        let mut compare = make_compare_receipt(MetricStatus::Pass);
1015        compare.deltas.insert(
1016            Metric::MaxRssKb,
1017            Delta {
1018                baseline: 100.0,
1019                current: 95.0,
1020                ratio: 0.95,
1021                pct: -0.05,
1022                regression: 0.0,
1023                statistic: MetricStatistic::Median,
1024                significance: None,
1025                status: MetricStatus::Pass,
1026            },
1027        );
1028        let annotations = github_annotations(&compare);
1029        assert!(
1030            annotations.is_empty(),
1031            "all-pass should produce no annotations"
1032        );
1033    }
1034
1035    // ── render_reason_line edge cases ───────────────────────────────────
1036
1037    #[test]
1038    fn render_reason_line_empty_string() {
1039        let compare = make_compare_receipt(MetricStatus::Warn);
1040        let line = render_reason_line(&compare, "");
1041        assert_eq!(line, "- \n");
1042    }
1043
1044    #[test]
1045    fn render_reason_line_no_underscore() {
1046        let compare = make_compare_receipt(MetricStatus::Warn);
1047        let line = render_reason_line(&compare, "nounderscore");
1048        assert_eq!(line, "- nounderscore\n");
1049    }
1050
1051    #[test]
1052    fn render_reason_line_single_underscore() {
1053        let compare = make_compare_receipt(MetricStatus::Warn);
1054        let line = render_reason_line(&compare, "_warn");
1055        // metric_part="" which won't parse → fallback
1056        assert_eq!(line, "- _warn\n");
1057    }
1058
1059    #[test]
1060    fn render_reason_line_very_long_token() {
1061        let compare = make_compare_receipt(MetricStatus::Warn);
1062        let long_token = format!("{}_warn", "a".repeat(500));
1063        let line = render_reason_line(&compare, &long_token);
1064        // Unknown metric → fallback
1065        assert!(line.starts_with("- "));
1066        assert!(line.contains(&"a".repeat(500)));
1067        assert!(line.ends_with('\n'));
1068    }
1069
1070    #[test]
1071    fn render_reason_line_fail_status() {
1072        let compare = make_compare_receipt(MetricStatus::Fail);
1073        let line = render_reason_line(&compare, "wall_ms_fail");
1074        assert!(line.contains("fail >"));
1075        assert!(line.contains("+15.00%"));
1076    }
1077
1078    // ── parse_reason_token edge cases ───────────────────────────────────
1079
1080    #[test]
1081    fn parse_reason_token_empty_string() {
1082        assert!(parse_reason_token("").is_none());
1083    }
1084
1085    #[test]
1086    fn parse_reason_token_only_underscores() {
1087        assert!(parse_reason_token("___").is_none());
1088    }
1089
1090    #[test]
1091    fn parse_reason_token_valid_fail() {
1092        let result = parse_reason_token("max_rss_kb_fail");
1093        assert!(result.is_some());
1094        let (metric, status) = result.unwrap();
1095        assert_eq!(metric, Metric::MaxRssKb);
1096        assert_eq!(status, MetricStatus::Fail);
1097    }
1098
1099    #[test]
1100    fn parse_reason_token_trailing_underscore() {
1101        // "wall_ms_" → status_part="" → None
1102        assert!(parse_reason_token("wall_ms_").is_none());
1103    }
1104
1105    // ── Template rendering with special metric values ───────────────────
1106
1107    #[test]
1108    fn template_renders_extreme_pct_values() {
1109        let mut compare = make_compare_receipt(MetricStatus::Fail);
1110        compare.deltas.get_mut(&Metric::WallMs).unwrap().pct = 99.99;
1111        let template = "{{#each rows}}{{delta_pct}}{{/each}}";
1112        let rendered = render_markdown_template(&compare, template).expect("extreme pct");
1113        assert!(rendered.contains("+9999.00%"));
1114    }
1115
1116    #[test]
1117    fn template_renders_negative_pct() {
1118        let mut compare = make_compare_receipt(MetricStatus::Pass);
1119        compare.deltas.get_mut(&Metric::WallMs).unwrap().pct = -0.5;
1120        let template = "{{#each rows}}{{delta_pct}}{{/each}}";
1121        let rendered = render_markdown_template(&compare, template).expect("negative pct");
1122        assert!(rendered.contains("-50.00%"));
1123    }
1124
1125    #[test]
1126    fn template_renders_zero_baseline_values() {
1127        let mut compare = make_compare_receipt(MetricStatus::Pass);
1128        let d = compare.deltas.get_mut(&Metric::WallMs).unwrap();
1129        d.baseline = 0.0;
1130        d.current = 0.0;
1131        d.pct = 0.0;
1132        let template = "{{#each rows}}b={{baseline}} c={{current}} d={{delta_pct}}{{/each}}";
1133        let rendered = render_markdown_template(&compare, template).expect("zero values");
1134        assert!(rendered.contains("b=0"));
1135        assert!(rendered.contains("c=0"));
1136        assert!(rendered.contains("d=0.00%"));
1137    }
1138
1139    #[test]
1140    fn template_renders_raw_fields() {
1141        let compare = make_compare_receipt(MetricStatus::Warn);
1142        let template = "{{#each rows}}raw_pct={{raw.pct}} sig={{raw.significance}}{{/each}}";
1143        let rendered = render_markdown_template(&compare, template).expect("raw fields");
1144        assert!(rendered.contains("raw_pct=0.15"));
1145        assert!(rendered.contains("sig="));
1146    }
1147
1148    #[test]
1149    fn template_empty_string_renders_empty() {
1150        let compare = make_compare_receipt(MetricStatus::Pass);
1151        let rendered = render_markdown_template(&compare, "").expect("empty template");
1152        assert_eq!(rendered, "");
1153    }
1154
1155    #[test]
1156    fn template_literal_text_only() {
1157        let compare = make_compare_receipt(MetricStatus::Pass);
1158        let rendered =
1159            render_markdown_template(&compare, "just literal text").expect("literal template");
1160        assert_eq!(rendered, "just literal text");
1161    }
1162
1163    #[test]
1164    fn snapshot_markdown_template_context_empty_deltas() {
1165        let mut compare = make_compare_receipt(MetricStatus::Pass);
1166        compare.deltas.clear();
1167        compare.budgets.clear();
1168        compare.verdict.reasons.clear();
1169        let ctx = markdown_template_context(&compare);
1170        let rows = ctx["rows"].as_array().unwrap();
1171        assert!(rows.is_empty());
1172    }
1173}
1174
1175#[cfg(test)]
1176mod property_tests {
1177    use super::*;
1178    use perfgate_types::{
1179        BenchMeta, Budget, CompareRef, Delta, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
1180    };
1181    use proptest::prelude::*;
1182    use std::collections::BTreeMap;
1183
1184    fn non_empty_string() -> impl Strategy<Value = String> {
1185        "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
1186    }
1187
1188    fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
1189        (non_empty_string(), non_empty_string())
1190            .prop_map(|(name, version)| ToolInfo { name, version })
1191    }
1192
1193    fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
1194        (
1195            non_empty_string(),
1196            proptest::option::of(non_empty_string()),
1197            proptest::collection::vec(non_empty_string(), 1..5),
1198            1u32..100,
1199            0u32..10,
1200            proptest::option::of(1u64..10000),
1201            proptest::option::of(100u64..60000),
1202        )
1203            .prop_map(
1204                |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
1205                    name,
1206                    cwd,
1207                    command,
1208                    repeat,
1209                    warmup,
1210                    work_units,
1211                    timeout_ms,
1212                },
1213            )
1214    }
1215
1216    fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
1217        (
1218            proptest::option::of(non_empty_string()),
1219            proptest::option::of(non_empty_string()),
1220        )
1221            .prop_map(|(path, run_id)| CompareRef { path, run_id })
1222    }
1223
1224    fn direction_strategy() -> impl Strategy<Value = Direction> {
1225        prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
1226    }
1227
1228    fn budget_strategy() -> impl Strategy<Value = Budget> {
1229        (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
1230            |(threshold, warn_factor, direction)| {
1231                let warn_threshold = threshold * warn_factor;
1232                Budget {
1233                    threshold,
1234                    warn_threshold,
1235                    direction,
1236                }
1237            },
1238        )
1239    }
1240
1241    fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
1242        prop_oneof![
1243            Just(MetricStatus::Pass),
1244            Just(MetricStatus::Warn),
1245            Just(MetricStatus::Fail),
1246        ]
1247    }
1248
1249    fn delta_strategy() -> impl Strategy<Value = Delta> {
1250        (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
1251            |(baseline, current, status)| {
1252                let ratio = current / baseline;
1253                let pct = (current - baseline) / baseline;
1254                let regression = if pct > 0.0 { pct } else { 0.0 };
1255                Delta {
1256                    baseline,
1257                    current,
1258                    ratio,
1259                    pct,
1260                    regression,
1261                    statistic: MetricStatistic::Median,
1262                    significance: None,
1263                    status,
1264                }
1265            },
1266        )
1267    }
1268
1269    fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
1270        prop_oneof![
1271            Just(VerdictStatus::Pass),
1272            Just(VerdictStatus::Warn),
1273            Just(VerdictStatus::Fail),
1274        ]
1275    }
1276
1277    fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
1278        (0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail)| VerdictCounts {
1279            pass,
1280            warn,
1281            fail,
1282        })
1283    }
1284
1285    fn verdict_strategy() -> impl Strategy<Value = Verdict> {
1286        (
1287            verdict_status_strategy(),
1288            verdict_counts_strategy(),
1289            proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
1290        )
1291            .prop_map(|(status, counts, reasons)| Verdict {
1292                status,
1293                counts,
1294                reasons,
1295            })
1296    }
1297
1298    fn metric_strategy() -> impl Strategy<Value = Metric> {
1299        prop_oneof![
1300            Just(Metric::BinaryBytes),
1301            Just(Metric::CpuMs),
1302            Just(Metric::CtxSwitches),
1303            Just(Metric::WallMs),
1304            Just(Metric::MaxRssKb),
1305            Just(Metric::PageFaults),
1306            Just(Metric::ThroughputPerS),
1307        ]
1308    }
1309
1310    fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
1311        proptest::collection::btree_map(metric_strategy(), budget_strategy(), 0..8)
1312    }
1313
1314    fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
1315        proptest::collection::btree_map(metric_strategy(), delta_strategy(), 0..8)
1316    }
1317
1318    fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
1319        (
1320            tool_info_strategy(),
1321            bench_meta_strategy(),
1322            compare_ref_strategy(),
1323            compare_ref_strategy(),
1324            budgets_map_strategy(),
1325            deltas_map_strategy(),
1326            verdict_strategy(),
1327        )
1328            .prop_map(
1329                |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
1330                    CompareReceipt {
1331                        schema: perfgate_types::COMPARE_SCHEMA_V1.to_string(),
1332                        tool,
1333                        bench,
1334                        baseline_ref,
1335                        current_ref,
1336                        budgets,
1337                        deltas,
1338                        verdict,
1339                    }
1340                },
1341            )
1342    }
1343
1344    proptest! {
1345        #![proptest_config(ProptestConfig::with_cases(100))]
1346
1347        #[test]
1348        fn markdown_rendering_completeness(receipt in compare_receipt_strategy()) {
1349            let md = render_markdown(&receipt);
1350
1351            let expected_emoji = match receipt.verdict.status {
1352                VerdictStatus::Pass => "✅",
1353                VerdictStatus::Warn => "⚠️",
1354                VerdictStatus::Fail => "❌",
1355            };
1356            prop_assert!(
1357                md.contains(expected_emoji),
1358                "Markdown should contain verdict emoji '{}' for status {:?}. Got:\n{}",
1359                expected_emoji,
1360                receipt.verdict.status,
1361                md
1362            );
1363
1364            let expected_status_word = match receipt.verdict.status {
1365                VerdictStatus::Pass => "pass",
1366                VerdictStatus::Warn => "warn",
1367                VerdictStatus::Fail => "fail",
1368            };
1369            prop_assert!(
1370                md.contains(expected_status_word),
1371                "Markdown should contain status word '{}'. Got:\n{}",
1372                expected_status_word,
1373                md
1374            );
1375
1376            prop_assert!(
1377                md.contains(&receipt.bench.name),
1378                "Markdown should contain benchmark name '{}'. Got:\n{}",
1379                receipt.bench.name,
1380                md
1381            );
1382
1383            prop_assert!(
1384                md.contains("| metric |"),
1385                "Markdown should contain table header. Got:\n{}",
1386                md
1387            );
1388
1389            for metric in receipt.deltas.keys() {
1390                let metric_name = metric.as_str();
1391                prop_assert!(
1392                    md.contains(metric_name),
1393                    "Markdown should contain metric '{}'. Got:\n{}",
1394                    metric_name,
1395                    md
1396                );
1397            }
1398
1399            for reason in &receipt.verdict.reasons {
1400                prop_assert!(
1401                    md.contains(reason),
1402                    "Markdown should contain verdict reason '{}'. Got:\n{}",
1403                    reason,
1404                    md
1405                );
1406            }
1407
1408            if !receipt.verdict.reasons.is_empty() {
1409                prop_assert!(
1410                    md.contains("**Notes:**"),
1411                    "Markdown should contain Notes section when there are reasons. Got:\n{}",
1412                    md
1413                );
1414            }
1415        }
1416    }
1417
1418    proptest! {
1419        #![proptest_config(ProptestConfig::with_cases(100))]
1420
1421        #[test]
1422        fn github_annotation_generation(receipt in compare_receipt_strategy()) {
1423            let annotations = github_annotations(&receipt);
1424
1425            let expected_fail_count = receipt.deltas.values()
1426                .filter(|d| d.status == MetricStatus::Fail)
1427                .count();
1428            let expected_warn_count = receipt.deltas.values()
1429                .filter(|d| d.status == MetricStatus::Warn)
1430                .count();
1431            let expected_pass_count = receipt.deltas.values()
1432                .filter(|d| d.status == MetricStatus::Pass)
1433                .count();
1434
1435            let actual_error_count = annotations.iter()
1436                .filter(|a| a.starts_with("::error::"))
1437                .count();
1438            let actual_warning_count = annotations.iter()
1439                .filter(|a| a.starts_with("::warning::"))
1440                .count();
1441
1442            prop_assert_eq!(
1443                actual_error_count,
1444                expected_fail_count,
1445                "Expected {} ::error:: annotations for {} Fail metrics, got {}. Annotations: {:?}",
1446                expected_fail_count,
1447                expected_fail_count,
1448                actual_error_count,
1449                annotations
1450            );
1451
1452            prop_assert_eq!(
1453                actual_warning_count,
1454                expected_warn_count,
1455                "Expected {} ::warning:: annotations for {} Warn metrics, got {}. Annotations: {:?}",
1456                expected_warn_count,
1457                expected_warn_count,
1458                actual_warning_count,
1459                annotations
1460            );
1461
1462            let total_annotations = annotations.len();
1463            let expected_total = expected_fail_count + expected_warn_count;
1464            prop_assert_eq!(
1465                total_annotations,
1466                expected_total,
1467                "Expected {} total annotations (fail: {}, warn: {}, pass: {} should produce none), got {}. Annotations: {:?}",
1468                expected_total,
1469                expected_fail_count,
1470                expected_warn_count,
1471                expected_pass_count,
1472                total_annotations,
1473                annotations
1474            );
1475
1476            for (metric, delta) in &receipt.deltas {
1477                if delta.status == MetricStatus::Pass {
1478                    continue;
1479                }
1480
1481                let metric_name = metric.as_str();
1482                let matching_annotation = annotations.iter().find(|a| a.contains(metric_name));
1483
1484                prop_assert!(
1485                    matching_annotation.is_some(),
1486                    "Expected annotation for metric '{}' with status {:?}. Annotations: {:?}",
1487                    metric_name,
1488                    delta.status,
1489                    annotations
1490                );
1491
1492                let annotation = matching_annotation.unwrap();
1493
1494                prop_assert!(
1495                    annotation.contains(&receipt.bench.name),
1496                    "Annotation should contain bench name '{}'. Got: {}",
1497                    receipt.bench.name,
1498                    annotation
1499                );
1500
1501                prop_assert!(
1502                    annotation.contains(metric_name),
1503                    "Annotation should contain metric name '{}'. Got: {}",
1504                    metric_name,
1505                    annotation
1506                );
1507
1508                let pct_str = format_pct(delta.pct);
1509                prop_assert!(
1510                    annotation.contains(&pct_str),
1511                    "Annotation should contain delta percentage '{}'. Got: {}",
1512                    pct_str,
1513                    annotation
1514                );
1515
1516                match delta.status {
1517                    MetricStatus::Fail => {
1518                        prop_assert!(
1519                            annotation.starts_with("::error::"),
1520                            "Fail metric should produce ::error:: annotation. Got: {}",
1521                            annotation
1522                        );
1523                    }
1524                    MetricStatus::Warn => {
1525                        prop_assert!(
1526                            annotation.starts_with("::warning::"),
1527                            "Warn metric should produce ::warning:: annotation. Got: {}",
1528                            annotation
1529                        );
1530                    }
1531                    MetricStatus::Pass => unreachable!(),
1532                }
1533            }
1534        }
1535    }
1536
1537    proptest! {
1538        #![proptest_config(ProptestConfig::with_cases(100))]
1539
1540        /// Same CompareReceipt always produces identical markdown output.
1541        #[test]
1542        fn markdown_rendering_determinism(receipt in compare_receipt_strategy()) {
1543            let md1 = render_markdown(&receipt);
1544            let md2 = render_markdown(&receipt);
1545            prop_assert_eq!(
1546                md1, md2,
1547                "render_markdown must be deterministic"
1548            );
1549        }
1550    }
1551
1552    proptest! {
1553        #![proptest_config(ProptestConfig::with_cases(100))]
1554
1555        /// Every annotation string starts with a valid GitHub Actions level prefix.
1556        #[test]
1557        fn annotation_valid_levels(receipt in compare_receipt_strategy()) {
1558            let annotations = github_annotations(&receipt);
1559            for annotation in &annotations {
1560                prop_assert!(
1561                    annotation.starts_with("::error::")
1562                        || annotation.starts_with("::warning::")
1563                        || annotation.starts_with("::notice::"),
1564                    "Annotation should start with a valid level (::error::, ::warning::, ::notice::). Got: {}",
1565                    annotation
1566                );
1567            }
1568        }
1569    }
1570
1571    proptest! {
1572        #![proptest_config(ProptestConfig::with_cases(50))]
1573
1574        /// Rendering an HTML-like template produces output with valid structure tags.
1575        #[test]
1576        fn template_html_structure(receipt in compare_receipt_strategy()) {
1577            let template = concat!(
1578                "<html><body>",
1579                "<h1>{{header}}</h1>",
1580                "<p>Bench: {{bench.name}}</p>",
1581                "<table>",
1582                "{{#each rows}}<tr><td>{{metric}}</td><td>{{status}}</td></tr>{{/each}}",
1583                "</table>",
1584                "</body></html>",
1585            );
1586            let rendered = render_markdown_template(&receipt, template)
1587                .expect("HTML template should render");
1588
1589            prop_assert!(rendered.contains("<html>"), "Missing <html> tag. Got:\n{}", rendered);
1590            prop_assert!(rendered.contains("</html>"), "Missing </html> tag. Got:\n{}", rendered);
1591            prop_assert!(rendered.contains("<body>"), "Missing <body> tag. Got:\n{}", rendered);
1592            prop_assert!(rendered.contains("</body>"), "Missing </body> tag. Got:\n{}", rendered);
1593            prop_assert!(rendered.contains("<table>"), "Missing <table> tag. Got:\n{}", rendered);
1594            prop_assert!(rendered.contains("</table>"), "Missing </table> tag. Got:\n{}", rendered);
1595            prop_assert!(
1596                rendered.contains(&receipt.bench.name),
1597                "HTML should contain bench name '{}'. Got:\n{}",
1598                receipt.bench.name,
1599                rendered
1600            );
1601
1602            for metric in receipt.deltas.keys() {
1603                let metric_name = metric.as_str();
1604                prop_assert!(
1605                    rendered.contains(metric_name),
1606                    "HTML should contain metric '{}'. Got:\n{}",
1607                    metric_name,
1608                    rendered
1609                );
1610            }
1611        }
1612    }
1613}