Skip to main content

perfgate_app/
diff.rs

1//! DiffUseCase — git-aware zero-argument comparison.
2//!
3//! This module implements the `diff` workflow which:
4//! 1. Auto-discovers `perfgate.toml` by walking up from cwd
5//! 2. Determines which benchmarks to run (all, or filtered by name)
6//! 3. Finds the baseline for each benchmark
7//! 4. Runs each benchmark
8//! 5. Compares against baseline
9//! 6. Returns structured diff outcomes for terminal rendering
10
11use crate::{Clock, CompareRequest, CompareUseCase, RunBenchRequest, RunBenchUseCase};
12use anyhow::Context;
13use perfgate_adapters::{HostProbe, ProcessRunner};
14use perfgate_types::{
15    BenchConfigFile, CompareReceipt, CompareRef, ConfigFile, HostMismatchPolicy, Metric,
16    MetricStatistic, RunReceipt, ToolInfo, VerdictStatus,
17};
18use std::collections::BTreeMap;
19use std::path::{Path, PathBuf};
20
21/// Request for the diff use case.
22#[derive(Debug, Clone)]
23pub struct DiffRequest {
24    /// Path to config file. If `None`, auto-discover by walking up from cwd.
25    pub config_path: Option<PathBuf>,
26
27    /// Filter to a single benchmark by name.
28    pub bench_filter: Option<String>,
29
30    /// Git ref to compare against (reserved for future use).
31    pub against: Option<String>,
32
33    /// If true, reduce repeat count for faster feedback.
34    pub quick: bool,
35
36    /// If true, produce JSON output instead of terminal rendering.
37    pub json: bool,
38
39    /// Tool info for receipts.
40    pub tool: ToolInfo,
41}
42
43/// Outcome for a single benchmark diff.
44#[derive(Debug, Clone)]
45pub struct BenchDiffOutcome {
46    /// Name of the benchmark.
47    pub bench_name: String,
48
49    /// The run receipt produced.
50    pub run_receipt: RunReceipt,
51
52    /// The compare receipt (None if no baseline was found).
53    pub compare_receipt: Option<CompareReceipt>,
54
55    /// Path to the baseline that was used (if any).
56    pub baseline_path: Option<PathBuf>,
57
58    /// True if no baseline was found.
59    pub no_baseline: bool,
60}
61
62/// Overall outcome of the diff command.
63#[derive(Debug, Clone)]
64pub struct DiffOutcome {
65    /// Path to the config file that was used.
66    pub config_path: PathBuf,
67
68    /// Per-benchmark outcomes.
69    pub bench_outcomes: Vec<BenchDiffOutcome>,
70
71    /// Overall exit code (0=pass, 2=fail).
72    pub exit_code: i32,
73}
74
75impl DiffOutcome {
76    /// Returns the worst verdict status across all benchmarks.
77    pub fn worst_verdict(&self) -> VerdictStatus {
78        let mut worst = VerdictStatus::Pass;
79        for outcome in &self.bench_outcomes {
80            if let Some(compare) = &outcome.compare_receipt {
81                match compare.verdict.status {
82                    VerdictStatus::Fail => return VerdictStatus::Fail,
83                    VerdictStatus::Warn => worst = VerdictStatus::Warn,
84                    VerdictStatus::Skip if worst == VerdictStatus::Pass => {
85                        worst = VerdictStatus::Skip;
86                    }
87                    _ => {}
88                }
89            }
90        }
91        worst
92    }
93}
94
95/// Walk up from `start` looking for `perfgate.toml` or `perfgate.json`.
96pub fn discover_config(start: &Path) -> Option<PathBuf> {
97    let mut dir = start.to_path_buf();
98    loop {
99        let toml_path = dir.join("perfgate.toml");
100        if toml_path.is_file() {
101            return Some(toml_path);
102        }
103        let json_path = dir.join("perfgate.json");
104        if json_path.is_file() {
105            return Some(json_path);
106        }
107        if !dir.pop() {
108            return None;
109        }
110    }
111}
112
113/// Use case for running a diff workflow.
114pub struct DiffUseCase<R: ProcessRunner + Clone, H: HostProbe + Clone, C: Clock + Clone> {
115    runner: R,
116    host_probe: H,
117    clock: C,
118}
119
120impl<R: ProcessRunner + Clone, H: HostProbe + Clone, C: Clock + Clone> DiffUseCase<R, H, C> {
121    pub fn new(runner: R, host_probe: H, clock: C) -> Self {
122        Self {
123            runner,
124            host_probe,
125            clock,
126        }
127    }
128
129    /// Execute the diff workflow.
130    pub fn execute(&self, req: DiffRequest) -> anyhow::Result<DiffOutcome> {
131        // 1. Discover config
132        let config_path = match &req.config_path {
133            Some(p) => p.clone(),
134            None => {
135                let cwd = std::env::current_dir().context("failed to get current directory")?;
136                discover_config(&cwd).ok_or_else(|| {
137                    anyhow::anyhow!(
138                        "no perfgate.toml found (searched upward from {})",
139                        cwd.display()
140                    )
141                })?
142            }
143        };
144
145        // 2. Load config
146        let config = perfgate_config::load_config_file(&config_path)
147            .with_context(|| format!("failed to load config from {}", config_path.display()))?;
148
149        config
150            .validate()
151            .map_err(|e| anyhow::anyhow!("config validation failed: {}", e))?;
152
153        // 3. Determine which benchmarks to run
154        let bench_names: Vec<String> = if let Some(filter) = &req.bench_filter {
155            // Verify the bench exists
156            if !config.benches.iter().any(|b| &b.name == filter) {
157                let available: Vec<&str> = config.benches.iter().map(|b| b.name.as_str()).collect();
158                anyhow::bail!(
159                    "bench '{}' not found in config; available: {}",
160                    filter,
161                    available.join(", ")
162                );
163            }
164            vec![filter.clone()]
165        } else {
166            if config.benches.is_empty() {
167                anyhow::bail!("no benchmarks defined in {}", config_path.display());
168            }
169            config.benches.iter().map(|b| b.name.clone()).collect()
170        };
171
172        // 4. Run each benchmark and compare
173        let mut bench_outcomes = Vec::new();
174        let mut max_exit_code: i32 = 0;
175
176        for bench_name in &bench_names {
177            let bench_config = config
178                .benches
179                .iter()
180                .find(|b| &b.name == bench_name)
181                .unwrap();
182
183            let outcome = self.run_single_bench(bench_config, &config, &req)?;
184
185            // Update exit code
186            if let Some(compare) = &outcome.compare_receipt {
187                match compare.verdict.status {
188                    VerdictStatus::Fail => {
189                        if max_exit_code < 2 {
190                            max_exit_code = 2;
191                        }
192                    }
193                    VerdictStatus::Warn | VerdictStatus::Pass | VerdictStatus::Skip => {}
194                }
195            }
196
197            bench_outcomes.push(outcome);
198        }
199
200        Ok(DiffOutcome {
201            config_path,
202            bench_outcomes,
203            exit_code: max_exit_code,
204        })
205    }
206
207    fn run_single_bench(
208        &self,
209        bench: &BenchConfigFile,
210        config: &ConfigFile,
211        req: &DiffRequest,
212    ) -> anyhow::Result<BenchDiffOutcome> {
213        let defaults = &config.defaults;
214
215        // Build run request
216        let mut repeat = bench.repeat.or(defaults.repeat).unwrap_or(5);
217        let warmup = bench.warmup.or(defaults.warmup).unwrap_or(0);
218
219        // In quick mode, reduce repeat count
220        if req.quick {
221            repeat = repeat.clamp(1, 2);
222        }
223
224        let timeout = bench
225            .timeout
226            .as_deref()
227            .map(|s| {
228                humantime::parse_duration(s)
229                    .with_context(|| format!("invalid timeout '{}' for bench '{}'", s, bench.name))
230            })
231            .transpose()?;
232
233        let cwd = bench.cwd.as_ref().map(PathBuf::from);
234
235        let run_request = RunBenchRequest {
236            name: bench.name.clone(),
237            cwd,
238            command: bench.command.clone(),
239            repeat,
240            warmup,
241            work_units: bench.work,
242            timeout,
243            env: Vec::new(),
244            output_cap_bytes: 8192,
245            allow_nonzero: false,
246            include_hostname_hash: false,
247        };
248
249        // Run the benchmark
250        let run_usecase = RunBenchUseCase::new(
251            self.runner.clone(),
252            self.host_probe.clone(),
253            self.clock.clone(),
254            req.tool.clone(),
255        );
256        let run_outcome = run_usecase.execute(run_request)?;
257        let run_receipt = run_outcome.receipt;
258
259        // Resolve baseline
260        let baseline_path =
261            perfgate_app_baseline_resolve::resolve_baseline_path(&None, &bench.name, config);
262
263        let baseline_receipt = if baseline_path.is_file() {
264            let content = std::fs::read_to_string(&baseline_path)
265                .with_context(|| format!("read baseline {}", baseline_path.display()))?;
266            Some(
267                serde_json::from_str::<RunReceipt>(&content)
268                    .with_context(|| format!("parse baseline {}", baseline_path.display()))?,
269            )
270        } else {
271            None
272        };
273
274        // Compare if baseline exists
275        let compare_receipt = if let Some(baseline) = &baseline_receipt {
276            let (budgets, metric_statistics) =
277                build_diff_budgets(bench, config, baseline, &run_receipt)?;
278
279            let compare_req = CompareRequest {
280                baseline: baseline.clone(),
281                current: run_receipt.clone(),
282                budgets,
283                metric_statistics,
284                significance: None,
285                baseline_ref: CompareRef {
286                    path: Some(baseline_path.display().to_string()),
287                    run_id: Some(baseline.run.id.clone()),
288                },
289                current_ref: CompareRef {
290                    path: None,
291                    run_id: Some(run_receipt.run.id.clone()),
292                },
293                tool: req.tool.clone(),
294                host_mismatch_policy: HostMismatchPolicy::Warn,
295            };
296
297            Some(CompareUseCase::execute(compare_req)?.receipt)
298        } else {
299            None
300        };
301
302        Ok(BenchDiffOutcome {
303            bench_name: bench.name.clone(),
304            run_receipt,
305            compare_receipt,
306            baseline_path: Some(baseline_path),
307            no_baseline: baseline_receipt.is_none(),
308        })
309    }
310}
311
312/// Build budgets for the diff comparison (simplified from CheckUseCase).
313fn build_diff_budgets(
314    bench: &BenchConfigFile,
315    config: &ConfigFile,
316    baseline: &RunReceipt,
317    current: &RunReceipt,
318) -> anyhow::Result<(
319    BTreeMap<Metric, perfgate_types::Budget>,
320    BTreeMap<Metric, MetricStatistic>,
321)> {
322    let defaults = &config.defaults;
323    let global_threshold = defaults.threshold.unwrap_or(0.20);
324    let global_warn_factor = defaults.warn_factor.unwrap_or(0.90);
325
326    let mut candidates = Vec::new();
327    candidates.push(Metric::WallMs);
328    if baseline.stats.cpu_ms.is_some() && current.stats.cpu_ms.is_some() {
329        candidates.push(Metric::CpuMs);
330    }
331    if baseline.stats.page_faults.is_some() && current.stats.page_faults.is_some() {
332        candidates.push(Metric::PageFaults);
333    }
334    if baseline.stats.ctx_switches.is_some() && current.stats.ctx_switches.is_some() {
335        candidates.push(Metric::CtxSwitches);
336    }
337    if baseline.stats.max_rss_kb.is_some() && current.stats.max_rss_kb.is_some() {
338        candidates.push(Metric::MaxRssKb);
339    }
340    if baseline.stats.binary_bytes.is_some() && current.stats.binary_bytes.is_some() {
341        candidates.push(Metric::BinaryBytes);
342    }
343    if baseline.stats.throughput_per_s.is_some() && current.stats.throughput_per_s.is_some() {
344        candidates.push(Metric::ThroughputPerS);
345    }
346
347    let mut budgets = BTreeMap::new();
348    let mut metric_statistics = BTreeMap::new();
349
350    for metric in candidates {
351        let override_opt = bench.budgets.as_ref().and_then(|b| b.get(&metric).cloned());
352
353        let threshold = override_opt
354            .as_ref()
355            .and_then(|o| o.threshold)
356            .unwrap_or(global_threshold);
357
358        let warn_factor = override_opt
359            .as_ref()
360            .and_then(|o| o.warn_factor)
361            .unwrap_or(global_warn_factor);
362
363        let warn_threshold = threshold * warn_factor;
364
365        let noise_threshold = override_opt
366            .as_ref()
367            .and_then(|o| o.noise_threshold)
368            .or(defaults.noise_threshold);
369
370        let noise_policy = override_opt
371            .as_ref()
372            .and_then(|o| o.noise_policy)
373            .or(defaults.noise_policy)
374            .unwrap_or(perfgate_types::NoisePolicy::Warn);
375
376        let direction = override_opt
377            .as_ref()
378            .and_then(|o| o.direction)
379            .unwrap_or_else(|| metric.default_direction());
380
381        let statistic = override_opt
382            .as_ref()
383            .and_then(|o| o.statistic)
384            .unwrap_or(MetricStatistic::Median);
385
386        budgets.insert(
387            metric,
388            perfgate_types::Budget {
389                threshold,
390                warn_threshold,
391                noise_threshold,
392                noise_policy,
393                direction,
394            },
395        );
396
397        metric_statistics.insert(metric, statistic);
398    }
399
400    Ok((budgets, metric_statistics))
401}
402
403// Module alias to avoid name collision with the crate itself
404mod perfgate_app_baseline_resolve {
405    pub use crate::baseline_resolve::resolve_baseline_path;
406}
407
408/// Render a terminal-friendly colored diff output from a DiffOutcome.
409pub fn render_terminal_diff(outcome: &DiffOutcome) -> String {
410    use crate::{format_metric_with_statistic, format_pct, format_value};
411
412    let mut out = String::new();
413
414    for bench_outcome in &outcome.bench_outcomes {
415        out.push_str(&format!("bench: {}\n", bench_outcome.bench_name));
416
417        if bench_outcome.no_baseline {
418            out.push_str("  (no baseline found, skipping comparison)\n\n");
419            continue;
420        }
421
422        if let Some(compare) = &bench_outcome.compare_receipt {
423            let verdict_label = match compare.verdict.status {
424                VerdictStatus::Pass => "PASS",
425                VerdictStatus::Warn => "WARN",
426                VerdictStatus::Fail => "FAIL",
427                VerdictStatus::Skip => "SKIP",
428            };
429            out.push_str(&format!("  verdict: {}\n", verdict_label));
430
431            for (metric, delta) in &compare.deltas {
432                let name = format_metric_with_statistic(*metric, delta.statistic);
433                let baseline_str = format_value(*metric, delta.baseline);
434                let current_str = format_value(*metric, delta.current);
435                let pct_str = format_pct(delta.pct);
436                let unit = metric.display_unit();
437
438                let status_indicator = match delta.status {
439                    perfgate_types::MetricStatus::Pass => " ",
440                    perfgate_types::MetricStatus::Warn => "~",
441                    perfgate_types::MetricStatus::Fail => "!",
442                    perfgate_types::MetricStatus::Skip => "-",
443                };
444
445                out.push_str(&format!(
446                    "  {status_indicator} {name}: {baseline_str} {unit} -> {current_str} {unit} ({pct_str})\n"
447                ));
448            }
449        }
450
451        out.push('\n');
452    }
453
454    out
455}
456
457/// Render the diff outcome as JSON.
458pub fn render_json_diff(outcome: &DiffOutcome) -> anyhow::Result<String> {
459    let mut entries = Vec::new();
460
461    for bench_outcome in &outcome.bench_outcomes {
462        let entry = serde_json::json!({
463            "bench": bench_outcome.bench_name,
464            "no_baseline": bench_outcome.no_baseline,
465            "compare": bench_outcome.compare_receipt,
466        });
467        entries.push(entry);
468    }
469
470    let output = serde_json::json!({
471        "config": outcome.config_path.display().to_string(),
472        "exit_code": outcome.exit_code,
473        "benchmarks": entries,
474    });
475
476    serde_json::to_string_pretty(&output).context("serialize diff output")
477}
478
479#[cfg(test)]
480mod tests {
481    use super::*;
482    use std::path::PathBuf;
483
484    #[test]
485    fn discover_config_finds_toml_in_current_dir() {
486        let tmp = tempfile::tempdir().unwrap();
487        let config_path = tmp.path().join("perfgate.toml");
488        std::fs::write(&config_path, "[defaults]\n").unwrap();
489
490        let found = discover_config(tmp.path());
491        assert_eq!(found, Some(config_path));
492    }
493
494    #[test]
495    fn discover_config_finds_toml_in_parent() {
496        let tmp = tempfile::tempdir().unwrap();
497        let config_path = tmp.path().join("perfgate.toml");
498        std::fs::write(&config_path, "[defaults]\n").unwrap();
499
500        let child = tmp.path().join("subdir");
501        std::fs::create_dir_all(&child).unwrap();
502
503        let found = discover_config(&child);
504        assert_eq!(found, Some(config_path));
505    }
506
507    #[test]
508    fn discover_config_prefers_toml_over_json() {
509        let tmp = tempfile::tempdir().unwrap();
510        let toml_path = tmp.path().join("perfgate.toml");
511        let json_path = tmp.path().join("perfgate.json");
512        std::fs::write(&toml_path, "[defaults]\n").unwrap();
513        std::fs::write(&json_path, "{}").unwrap();
514
515        let found = discover_config(tmp.path());
516        assert_eq!(found, Some(toml_path));
517    }
518
519    #[test]
520    fn discover_config_falls_back_to_json() {
521        let tmp = tempfile::tempdir().unwrap();
522        let json_path = tmp.path().join("perfgate.json");
523        std::fs::write(&json_path, "{}").unwrap();
524
525        let found = discover_config(tmp.path());
526        assert_eq!(found, Some(json_path));
527    }
528
529    #[test]
530    fn discover_config_returns_none_when_not_found() {
531        let tmp = tempfile::tempdir().unwrap();
532        let found = discover_config(tmp.path());
533        assert!(found.is_none());
534    }
535
536    #[test]
537    fn render_terminal_diff_no_baseline() {
538        let outcome = DiffOutcome {
539            config_path: PathBuf::from("perfgate.toml"),
540            bench_outcomes: vec![BenchDiffOutcome {
541                bench_name: "my-bench".to_string(),
542                run_receipt: make_dummy_run_receipt(),
543                compare_receipt: None,
544                baseline_path: None,
545                no_baseline: true,
546            }],
547            exit_code: 0,
548        };
549
550        let rendered = render_terminal_diff(&outcome);
551        assert!(rendered.contains("my-bench"));
552        assert!(rendered.contains("no baseline found"));
553    }
554
555    #[test]
556    fn render_terminal_diff_with_comparison() {
557        use perfgate_types::*;
558
559        let mut deltas = BTreeMap::new();
560        deltas.insert(
561            Metric::WallMs,
562            Delta {
563                baseline: 100.0,
564                current: 110.0,
565                ratio: 1.10,
566                pct: 0.10,
567                regression: 0.10,
568                statistic: MetricStatistic::Median,
569                significance: None,
570                cv: None,
571                noise_threshold: None,
572                status: MetricStatus::Pass,
573            },
574        );
575
576        let mut budgets = BTreeMap::new();
577        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
578
579        let compare = CompareReceipt {
580            schema: COMPARE_SCHEMA_V1.to_string(),
581            tool: ToolInfo {
582                name: "perfgate".into(),
583                version: "0.1.0".into(),
584            },
585            bench: BenchMeta {
586                name: "my-bench".into(),
587                cwd: None,
588                command: vec!["echo".into()],
589                repeat: 2,
590                warmup: 0,
591                work_units: None,
592                timeout_ms: None,
593            },
594            baseline_ref: CompareRef {
595                path: None,
596                run_id: None,
597            },
598            current_ref: CompareRef {
599                path: None,
600                run_id: None,
601            },
602            budgets,
603            deltas,
604            verdict: Verdict {
605                status: VerdictStatus::Pass,
606                counts: VerdictCounts {
607                    pass: 1,
608                    warn: 0,
609                    fail: 0,
610                    skip: 0,
611                },
612                reasons: vec![],
613            },
614        };
615
616        let outcome = DiffOutcome {
617            config_path: PathBuf::from("perfgate.toml"),
618            bench_outcomes: vec![BenchDiffOutcome {
619                bench_name: "my-bench".to_string(),
620                run_receipt: make_dummy_run_receipt(),
621                compare_receipt: Some(compare),
622                baseline_path: Some(PathBuf::from("baselines/my-bench.json")),
623                no_baseline: false,
624            }],
625            exit_code: 0,
626        };
627
628        let rendered = render_terminal_diff(&outcome);
629        assert!(rendered.contains("my-bench"));
630        assert!(rendered.contains("PASS"));
631        assert!(rendered.contains("wall_ms"));
632        assert!(rendered.contains("+10.00%"));
633    }
634
635    #[test]
636    fn render_json_diff_produces_valid_json() {
637        let outcome = DiffOutcome {
638            config_path: PathBuf::from("perfgate.toml"),
639            bench_outcomes: vec![BenchDiffOutcome {
640                bench_name: "test".to_string(),
641                run_receipt: make_dummy_run_receipt(),
642                compare_receipt: None,
643                baseline_path: None,
644                no_baseline: true,
645            }],
646            exit_code: 0,
647        };
648
649        let json = render_json_diff(&outcome).unwrap();
650        let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
651        assert_eq!(parsed["exit_code"], 0);
652        assert_eq!(parsed["benchmarks"][0]["bench"], "test");
653        assert_eq!(parsed["benchmarks"][0]["no_baseline"], true);
654    }
655
656    #[test]
657    fn worst_verdict_returns_fail_when_any_fail() {
658        use perfgate_types::*;
659
660        let pass_compare = make_compare_with_verdict(VerdictStatus::Pass);
661        let fail_compare = make_compare_with_verdict(VerdictStatus::Fail);
662
663        let outcome = DiffOutcome {
664            config_path: PathBuf::from("perfgate.toml"),
665            bench_outcomes: vec![
666                BenchDiffOutcome {
667                    bench_name: "a".to_string(),
668                    run_receipt: make_dummy_run_receipt(),
669                    compare_receipt: Some(pass_compare),
670                    baseline_path: None,
671                    no_baseline: false,
672                },
673                BenchDiffOutcome {
674                    bench_name: "b".to_string(),
675                    run_receipt: make_dummy_run_receipt(),
676                    compare_receipt: Some(fail_compare),
677                    baseline_path: None,
678                    no_baseline: false,
679                },
680            ],
681            exit_code: 2,
682        };
683
684        assert_eq!(outcome.worst_verdict(), VerdictStatus::Fail);
685    }
686
687    #[test]
688    fn worst_verdict_returns_pass_when_no_comparisons() {
689        let outcome = DiffOutcome {
690            config_path: PathBuf::from("perfgate.toml"),
691            bench_outcomes: vec![BenchDiffOutcome {
692                bench_name: "a".to_string(),
693                run_receipt: make_dummy_run_receipt(),
694                compare_receipt: None,
695                baseline_path: None,
696                no_baseline: true,
697            }],
698            exit_code: 0,
699        };
700
701        assert_eq!(outcome.worst_verdict(), VerdictStatus::Pass);
702    }
703
704    fn make_dummy_run_receipt() -> RunReceipt {
705        use perfgate_types::*;
706
707        RunReceipt {
708            schema: RUN_SCHEMA_V1.to_string(),
709            tool: ToolInfo {
710                name: "perfgate".into(),
711                version: "0.1.0".into(),
712            },
713            bench: BenchMeta {
714                name: "test".into(),
715                cwd: None,
716                command: vec!["echo".into()],
717                repeat: 2,
718                warmup: 0,
719                work_units: None,
720                timeout_ms: None,
721            },
722            run: RunMeta {
723                id: "test-id".into(),
724                started_at: "2024-01-01T00:00:00Z".into(),
725                ended_at: "2024-01-01T00:00:01Z".into(),
726                host: HostInfo {
727                    os: "linux".into(),
728                    arch: "x86_64".into(),
729                    cpu_count: None,
730                    memory_bytes: None,
731                    hostname_hash: None,
732                },
733            },
734            samples: vec![],
735            stats: Stats {
736                wall_ms: U64Summary::new(100, 90, 110),
737                cpu_ms: None,
738                page_faults: None,
739                ctx_switches: None,
740                max_rss_kb: None,
741                binary_bytes: None,
742                throughput_per_s: None,
743                io_read_bytes: None,
744                io_write_bytes: None,
745                energy_uj: None,
746                network_packets: None,
747            },
748        }
749    }
750
751    fn make_compare_with_verdict(status: VerdictStatus) -> CompareReceipt {
752        use perfgate_types::*;
753
754        CompareReceipt {
755            schema: COMPARE_SCHEMA_V1.to_string(),
756            tool: ToolInfo {
757                name: "perfgate".into(),
758                version: "0.1.0".into(),
759            },
760            bench: BenchMeta {
761                name: "test".into(),
762                cwd: None,
763                command: vec!["echo".into()],
764                repeat: 2,
765                warmup: 0,
766                work_units: None,
767                timeout_ms: None,
768            },
769            baseline_ref: CompareRef {
770                path: None,
771                run_id: None,
772            },
773            current_ref: CompareRef {
774                path: None,
775                run_id: None,
776            },
777            budgets: BTreeMap::new(),
778            deltas: BTreeMap::new(),
779            verdict: Verdict {
780                status,
781                counts: VerdictCounts {
782                    pass: 0,
783                    warn: 0,
784                    fail: 0,
785                    skip: 0,
786                },
787                reasons: vec![],
788            },
789        }
790    }
791}