Skip to main content

perfgate_summary/
lib.rs

1//! Summarization logic for perfgate comparison receipts.
2//!
3//! Aggregates multiple comparison receipts into a compact summary table showing
4//! benchmark name, verdict status, wall-clock time, and percentage change.
5//!
6//! Part of the [perfgate](https://github.com/EffortlessMetrics/perfgate) workspace.
7//!
8//! # Example
9//!
10//! ```no_run
11//! use perfgate_summary::{SummaryRequest, SummaryUseCase};
12//!
13//! let uc = SummaryUseCase;
14//! let outcome = uc.execute(SummaryRequest {
15//!     files: vec!["artifacts/perfgate/*.compare.json".to_string()],
16//! }).unwrap();
17//! for row in &outcome.rows {
18//!     println!("{}: {} ({})", row.benchmark, row.status, row.change_pct);
19//! }
20//! ```
21
22use anyhow::Context;
23use glob::glob;
24use perfgate_types::{CompareReceipt, Metric};
25use std::fs;
26
27/// Request for summarizing multiple comparison receipts.
28#[derive(Debug, Clone)]
29pub struct SummaryRequest {
30    /// List of glob patterns or file paths.
31    pub files: Vec<String>,
32}
33
34/// A single row in the summary table.
35#[derive(Debug, Clone)]
36pub struct SummaryRow {
37    pub benchmark: String,
38    pub status: String,
39    pub wall_ms: String,
40    pub change_pct: String,
41}
42
43/// Outcome of the summary operation.
44#[derive(Debug, Clone)]
45pub struct SummaryOutcome {
46    pub rows: Vec<SummaryRow>,
47    pub failed: bool,
48}
49
50/// Use case for summarizing comparison receipts.
51pub struct SummaryUseCase;
52
53impl SummaryUseCase {
54    /// Executes the summary use case.
55    pub fn execute(&self, req: SummaryRequest) -> anyhow::Result<SummaryOutcome> {
56        let mut paths = Vec::new();
57        for pattern in req.files {
58            for entry in
59                glob(&pattern).with_context(|| format!("invalid glob pattern: {}", pattern))?
60            {
61                paths.push(entry?);
62            }
63        }
64
65        if paths.is_empty() {
66            anyhow::bail!("no comparison receipts found");
67        }
68
69        let mut failed = false;
70        let mut rows = Vec::new();
71        for path in paths {
72            let content =
73                fs::read_to_string(&path).with_context(|| format!("read {}", path.display()))?;
74            let compare: CompareReceipt = serde_json::from_str(&content)
75                .with_context(|| format!("parse JSON from {}", path.display()))?;
76
77            let benchmark = compare.bench.name.clone();
78            let status = format!("{:?}", compare.verdict.status).to_lowercase();
79            if status == "fail" {
80                failed = true;
81            }
82            let wall = compare.deltas.get(&Metric::WallMs);
83            let (wall_ms, change_pct) = if let Some(d) = wall {
84                (
85                    format!("{:.2}", d.current),
86                    format!("{:.1}%", d.pct * 100.0),
87                )
88            } else {
89                ("N/A".to_string(), "N/A".to_string())
90            };
91
92            rows.push(SummaryRow {
93                benchmark,
94                status,
95                wall_ms,
96                change_pct,
97            });
98        }
99
100        Ok(SummaryOutcome { rows, failed })
101    }
102
103    /// Renders the summary outcome as a Markdown table.
104    pub fn render_markdown(&self, outcome: &SummaryOutcome) -> String {
105        let mut md = String::new();
106        md.push_str("\n| Benchmark | Status | Wall (ms) | Change |\n");
107        md.push_str("|-----------|--------|-----------|--------|\n");
108
109        for row in &outcome.rows {
110            md.push_str(&format!(
111                "| {} | {} | {} | {} |\n",
112                row.benchmark, row.status, row.wall_ms, row.change_pct
113            ));
114        }
115        md
116    }
117}
118
119#[cfg(test)]
120mod tests {
121    use super::*;
122    use perfgate_types::{
123        BenchMeta, CompareReceipt, CompareRef, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
124    };
125    use std::collections::BTreeMap;
126    use tempfile::tempdir;
127
128    #[test]
129    fn test_summary_execution() {
130        let dir = tempdir().unwrap();
131        let path = dir.path().join("run1.json");
132
133        let receipt = CompareReceipt {
134            schema: "perfgate.compare.v1".to_string(),
135            tool: ToolInfo {
136                name: "test".into(),
137                version: "0".into(),
138            },
139            bench: BenchMeta {
140                name: "bench1".into(),
141                cwd: None,
142                command: vec![],
143                repeat: 0,
144                warmup: 0,
145                work_units: None,
146                timeout_ms: None,
147            },
148            baseline_ref: CompareRef {
149                path: None,
150                run_id: None,
151            },
152            current_ref: CompareRef {
153                path: None,
154                run_id: None,
155            },
156            budgets: BTreeMap::new(),
157            deltas: BTreeMap::new(),
158            verdict: Verdict {
159                status: VerdictStatus::Pass,
160                counts: VerdictCounts {
161                    pass: 0,
162                    warn: 1,
163                    fail: 0,
164                    skip: 0,
165                },
166                reasons: vec![],
167            },
168        };
169
170        fs::write(&path, serde_json::to_string(&receipt).unwrap()).unwrap();
171
172        let usecase = SummaryUseCase;
173        let outcome = usecase
174            .execute(SummaryRequest {
175                files: vec![path.to_str().unwrap().to_string()],
176            })
177            .unwrap();
178
179        assert_eq!(outcome.rows.len(), 1);
180        assert_eq!(outcome.rows[0].benchmark, "bench1");
181        assert_eq!(outcome.rows[0].status, "pass");
182    }
183}