Skip to main content

perfgate_summary/
lib.rs

1//! Summarization logic for perfgate comparison receipts.
2
3use anyhow::Context;
4use glob::glob;
5use perfgate_types::{CompareReceipt, Metric};
6use std::fs;
7
8/// Request for summarizing multiple comparison receipts.
9#[derive(Debug, Clone)]
10pub struct SummaryRequest {
11    /// List of glob patterns or file paths.
12    pub files: Vec<String>,
13}
14
15/// A single row in the summary table.
16#[derive(Debug, Clone)]
17pub struct SummaryRow {
18    pub benchmark: String,
19    pub status: String,
20    pub wall_ms: String,
21    pub change_pct: String,
22}
23
24/// Outcome of the summary operation.
25#[derive(Debug, Clone)]
26pub struct SummaryOutcome {
27    pub rows: Vec<SummaryRow>,
28    pub failed: bool,
29}
30
31/// Use case for summarizing comparison receipts.
32pub struct SummaryUseCase;
33
34impl SummaryUseCase {
35    /// Executes the summary use case.
36    pub fn execute(&self, req: SummaryRequest) -> anyhow::Result<SummaryOutcome> {
37        let mut paths = Vec::new();
38        for pattern in req.files {
39            for entry in
40                glob(&pattern).with_context(|| format!("invalid glob pattern: {}", pattern))?
41            {
42                paths.push(entry?);
43            }
44        }
45
46        if paths.is_empty() {
47            anyhow::bail!("no comparison receipts found");
48        }
49
50        let mut failed = false;
51        let mut rows = Vec::new();
52        for path in paths {
53            let content =
54                fs::read_to_string(&path).with_context(|| format!("read {}", path.display()))?;
55            let compare: CompareReceipt = serde_json::from_str(&content)
56                .with_context(|| format!("parse JSON from {}", path.display()))?;
57
58            let benchmark = compare.bench.name.clone();
59            let status = format!("{:?}", compare.verdict.status).to_lowercase();
60            if status == "fail" {
61                failed = true;
62            }
63            let wall = compare.deltas.get(&Metric::WallMs);
64            let (wall_ms, change_pct) = if let Some(d) = wall {
65                (
66                    format!("{:.2}", d.current),
67                    format!("{:.1}%", d.pct * 100.0),
68                )
69            } else {
70                ("N/A".to_string(), "N/A".to_string())
71            };
72
73            rows.push(SummaryRow {
74                benchmark,
75                status,
76                wall_ms,
77                change_pct,
78            });
79        }
80
81        Ok(SummaryOutcome { rows, failed })
82    }
83
84    /// Renders the summary outcome as a Markdown table.
85    pub fn render_markdown(&self, outcome: &SummaryOutcome) -> String {
86        let mut md = String::new();
87        md.push_str("\n| Benchmark | Status | Wall (ms) | Change |\n");
88        md.push_str("|-----------|--------|-----------|--------|\n");
89
90        for row in &outcome.rows {
91            md.push_str(&format!(
92                "| {} | {} | {} | {} |\n",
93                row.benchmark, row.status, row.wall_ms, row.change_pct
94            ));
95        }
96        md
97    }
98}
99
100#[cfg(test)]
101mod tests {
102    use super::*;
103    use perfgate_types::{
104        BenchMeta, CompareReceipt, CompareRef, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
105    };
106    use std::collections::BTreeMap;
107    use tempfile::tempdir;
108
109    #[test]
110    fn test_summary_execution() {
111        let dir = tempdir().unwrap();
112        let path = dir.path().join("run1.json");
113
114        let receipt = CompareReceipt {
115            schema: "perfgate.compare.v1".to_string(),
116            tool: ToolInfo {
117                name: "test".into(),
118                version: "0".into(),
119            },
120            bench: BenchMeta {
121                name: "bench1".into(),
122                cwd: None,
123                command: vec![],
124                repeat: 0,
125                warmup: 0,
126                work_units: None,
127                timeout_ms: None,
128            },
129            baseline_ref: CompareRef {
130                path: None,
131                run_id: None,
132            },
133            current_ref: CompareRef {
134                path: None,
135                run_id: None,
136            },
137            budgets: BTreeMap::new(),
138            deltas: BTreeMap::new(),
139            verdict: Verdict {
140                status: VerdictStatus::Pass,
141                counts: VerdictCounts {
142                    pass: 0,
143                    warn: 1,
144                    fail: 0,
145                    skip: 0,
146                },
147                reasons: vec![],
148            },
149        };
150
151        fs::write(&path, serde_json::to_string(&receipt).unwrap()).unwrap();
152
153        let usecase = SummaryUseCase;
154        let outcome = usecase
155            .execute(SummaryRequest {
156                files: vec![path.to_str().unwrap().to_string()],
157            })
158            .unwrap();
159
160        assert_eq!(outcome.rows.len(), 1);
161        assert_eq!(outcome.rows[0].benchmark, "bench1");
162        assert_eq!(outcome.rows[0].status, "pass");
163    }
164}