Skip to main content

perfgate_app/
report.rs

1//! Report use case for generating cockpit-compatible report envelopes.
2//!
3//! This module provides functionality for wrapping a CompareReceipt into
4//! a `perfgate.report.v1` envelope suitable for cockpit integration and
5//! CI dashboard display.
6
7use perfgate_domain::derive_report;
8use perfgate_types::{
9    CompareReceipt, Direction, FINDING_CODE_METRIC_FAIL, FindingData, PerfgateReport,
10    REPORT_SCHEMA_V1, ReportFinding, ReportSummary, Severity,
11};
12
13/// Request for generating a report from a compare receipt.
14#[derive(Debug, Clone)]
15pub struct ReportRequest {
16    /// The compare receipt to wrap into a report.
17    pub compare: CompareReceipt,
18}
19
20/// Result of a report generation operation.
21#[derive(Debug, Clone)]
22pub struct ReportResult {
23    /// The generated report.
24    pub report: PerfgateReport,
25}
26
27/// Use case for generating perfgate reports.
28pub struct ReportUseCase;
29
30impl ReportUseCase {
31    /// Execute the report generation.
32    ///
33    /// Creates a PerfgateReport from a CompareReceipt by:
34    /// - Setting report_type to "perfgate.report.v1"
35    /// - Copying verdict from compare receipt
36    /// - Including the full compare receipt
37    /// - Deriving findings from domain logic (warn/fail metrics)
38    /// - Computing summary counts
39    ///
40    /// # Invariants
41    ///
42    /// - Report verdict matches compare verdict
43    /// - Finding count equals warn + fail count in deltas
44    /// - Output is deterministic (same input -> same output)
45    pub fn execute(req: ReportRequest) -> ReportResult {
46        let domain_report = derive_report(&req.compare);
47
48        // Convert domain findings to types findings
49        let findings: Vec<ReportFinding> = domain_report
50            .findings
51            .into_iter()
52            .map(|f| {
53                let severity = if f.code == FINDING_CODE_METRIC_FAIL {
54                    Severity::Fail
55                } else {
56                    Severity::Warn
57                };
58
59                let direction = req
60                    .compare
61                    .budgets
62                    .iter()
63                    .find(|(metric, _)| metric_to_string(**metric) == f.data.metric_name)
64                    .map(|(_, budget)| budget.direction)
65                    .unwrap_or(Direction::Lower);
66
67                let message = format!(
68                    "{} for {}: {:.2}% regression (threshold: {:.2}%)",
69                    if severity == Severity::Fail {
70                        "Performance regression exceeded threshold"
71                    } else {
72                        "Performance regression near threshold"
73                    },
74                    f.data.metric_name,
75                    f.data.regression_pct * 100.0,
76                    f.data.threshold * 100.0
77                );
78
79                ReportFinding {
80                    check_id: f.check_id,
81                    code: f.code,
82                    severity,
83                    message,
84                    data: Some(FindingData {
85                        metric_name: f.data.metric_name,
86                        baseline: f.data.baseline,
87                        current: f.data.current,
88                        regression_pct: f.data.regression_pct,
89                        threshold: f.data.threshold,
90                        direction,
91                    }),
92                }
93            })
94            .collect();
95
96        let summary = ReportSummary {
97            pass_count: req.compare.verdict.counts.pass,
98            warn_count: req.compare.verdict.counts.warn,
99            fail_count: req.compare.verdict.counts.fail,
100            skip_count: req.compare.verdict.counts.skip,
101            total_count: req.compare.verdict.counts.pass
102                + req.compare.verdict.counts.warn
103                + req.compare.verdict.counts.fail
104                + req.compare.verdict.counts.skip,
105        };
106
107        let report = PerfgateReport {
108            report_type: REPORT_SCHEMA_V1.to_string(),
109            verdict: req.compare.verdict.clone(),
110            compare: Some(req.compare),
111            findings,
112            summary,
113            profile_path: None,
114        };
115
116        ReportResult { report }
117    }
118}
119
120/// Converts a Metric enum to its string representation.
121fn metric_to_string(metric: perfgate_types::Metric) -> String {
122    metric.as_str().to_string()
123}
124
125#[cfg(test)]
126mod tests {
127    use super::*;
128    use perfgate_types::{
129        BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
130        MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
131    };
132    use std::collections::BTreeMap;
133
134    fn create_pass_compare_receipt() -> CompareReceipt {
135        let mut budgets = BTreeMap::new();
136        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
137
138        let mut deltas = BTreeMap::new();
139        deltas.insert(
140            Metric::WallMs,
141            Delta {
142                baseline: 1000.0,
143                current: 900.0,
144                ratio: 0.9,
145                pct: -0.1,
146                regression: 0.0,
147                cv: None,
148                noise_threshold: None,
149                statistic: MetricStatistic::Median,
150                significance: None,
151                status: MetricStatus::Pass,
152            },
153        );
154
155        CompareReceipt {
156            schema: COMPARE_SCHEMA_V1.to_string(),
157            tool: ToolInfo {
158                name: "perfgate".to_string(),
159                version: "0.1.0".to_string(),
160            },
161            bench: BenchMeta {
162                name: "test-bench".to_string(),
163                cwd: None,
164                command: vec!["echo".to_string(), "hello".to_string()],
165                repeat: 5,
166                warmup: 0,
167                work_units: None,
168                timeout_ms: None,
169            },
170            baseline_ref: CompareRef {
171                path: Some("baseline.json".to_string()),
172                run_id: Some("baseline-001".to_string()),
173            },
174            current_ref: CompareRef {
175                path: Some("current.json".to_string()),
176                run_id: Some("current-001".to_string()),
177            },
178            budgets,
179            deltas,
180            verdict: Verdict {
181                status: VerdictStatus::Pass,
182                counts: VerdictCounts {
183                    pass: 1,
184                    warn: 0,
185                    fail: 0,
186                    skip: 0,
187                },
188                reasons: vec![],
189            },
190        }
191    }
192
193    fn create_warn_compare_receipt() -> CompareReceipt {
194        let mut budgets = BTreeMap::new();
195        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
196
197        let mut deltas = BTreeMap::new();
198        deltas.insert(
199            Metric::WallMs,
200            Delta {
201                baseline: 1000.0,
202                current: 1190.0,
203                ratio: 1.19,
204                pct: 0.19,
205                regression: 0.19,
206                cv: None,
207                noise_threshold: None,
208                statistic: MetricStatistic::Median,
209                significance: None,
210                status: MetricStatus::Warn,
211            },
212        );
213
214        CompareReceipt {
215            schema: COMPARE_SCHEMA_V1.to_string(),
216            tool: ToolInfo {
217                name: "perfgate".to_string(),
218                version: "0.1.0".to_string(),
219            },
220            bench: BenchMeta {
221                name: "test-bench".to_string(),
222                cwd: None,
223                command: vec!["echo".to_string(), "hello".to_string()],
224                repeat: 5,
225                warmup: 0,
226                work_units: None,
227                timeout_ms: None,
228            },
229            baseline_ref: CompareRef {
230                path: Some("baseline.json".to_string()),
231                run_id: Some("baseline-001".to_string()),
232            },
233            current_ref: CompareRef {
234                path: Some("current.json".to_string()),
235                run_id: Some("current-001".to_string()),
236            },
237            budgets,
238            deltas,
239            verdict: Verdict {
240                status: VerdictStatus::Warn,
241                counts: VerdictCounts {
242                    pass: 0,
243                    warn: 1,
244                    fail: 0,
245                    skip: 0,
246                },
247                reasons: vec!["wall_ms_warn".to_string()],
248            },
249        }
250    }
251
252    fn create_fail_compare_receipt() -> CompareReceipt {
253        let mut budgets = BTreeMap::new();
254        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
255
256        let mut deltas = BTreeMap::new();
257        deltas.insert(
258            Metric::WallMs,
259            Delta {
260                baseline: 1000.0,
261                current: 1500.0,
262                ratio: 1.5,
263                pct: 0.5,
264                regression: 0.5,
265                cv: None,
266                noise_threshold: None,
267                statistic: MetricStatistic::Median,
268                significance: None,
269                status: MetricStatus::Fail,
270            },
271        );
272
273        CompareReceipt {
274            schema: COMPARE_SCHEMA_V1.to_string(),
275            tool: ToolInfo {
276                name: "perfgate".to_string(),
277                version: "0.1.0".to_string(),
278            },
279            bench: BenchMeta {
280                name: "test-bench".to_string(),
281                cwd: None,
282                command: vec!["echo".to_string(), "hello".to_string()],
283                repeat: 5,
284                warmup: 0,
285                work_units: None,
286                timeout_ms: None,
287            },
288            baseline_ref: CompareRef {
289                path: Some("baseline.json".to_string()),
290                run_id: Some("baseline-001".to_string()),
291            },
292            current_ref: CompareRef {
293                path: Some("current.json".to_string()),
294                run_id: Some("current-001".to_string()),
295            },
296            budgets,
297            deltas,
298            verdict: Verdict {
299                status: VerdictStatus::Fail,
300                counts: VerdictCounts {
301                    pass: 0,
302                    warn: 0,
303                    fail: 1,
304                    skip: 0,
305                },
306                reasons: vec!["wall_ms_fail".to_string()],
307            },
308        }
309    }
310
311    #[test]
312    fn test_report_from_pass_compare() {
313        let compare = create_pass_compare_receipt();
314        let result = ReportUseCase::execute(ReportRequest { compare });
315
316        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
317        assert_eq!(result.report.verdict.status, VerdictStatus::Pass);
318        assert!(result.report.findings.is_empty());
319        assert_eq!(result.report.summary.pass_count, 1);
320        assert_eq!(result.report.summary.warn_count, 0);
321        assert_eq!(result.report.summary.fail_count, 0);
322        assert_eq!(result.report.summary.total_count, 1);
323    }
324
325    #[test]
326    fn test_report_from_warn_compare() {
327        let compare = create_warn_compare_receipt();
328        let result = ReportUseCase::execute(ReportRequest { compare });
329
330        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
331        assert_eq!(result.report.verdict.status, VerdictStatus::Warn);
332        assert_eq!(result.report.findings.len(), 1);
333        assert_eq!(result.report.findings[0].code, "metric_warn");
334        assert_eq!(result.report.findings[0].severity, Severity::Warn);
335        assert_eq!(result.report.summary.warn_count, 1);
336    }
337
338    #[test]
339    fn test_report_from_fail_compare() {
340        let compare = create_fail_compare_receipt();
341        let result = ReportUseCase::execute(ReportRequest { compare });
342
343        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
344        assert_eq!(result.report.verdict.status, VerdictStatus::Fail);
345        assert_eq!(result.report.findings.len(), 1);
346        assert_eq!(result.report.findings[0].code, "metric_fail");
347        assert_eq!(result.report.findings[0].severity, Severity::Fail);
348        assert_eq!(result.report.summary.fail_count, 1);
349    }
350
351    #[test]
352    fn test_report_verdict_matches_compare_verdict() {
353        let pass_compare = create_pass_compare_receipt();
354        let pass_result = ReportUseCase::execute(ReportRequest {
355            compare: pass_compare.clone(),
356        });
357        assert_eq!(
358            pass_result.report.verdict.status,
359            pass_compare.verdict.status
360        );
361
362        let warn_compare = create_warn_compare_receipt();
363        let warn_result = ReportUseCase::execute(ReportRequest {
364            compare: warn_compare.clone(),
365        });
366        assert_eq!(
367            warn_result.report.verdict.status,
368            warn_compare.verdict.status
369        );
370
371        let fail_compare = create_fail_compare_receipt();
372        let fail_result = ReportUseCase::execute(ReportRequest {
373            compare: fail_compare.clone(),
374        });
375        assert_eq!(
376            fail_result.report.verdict.status,
377            fail_compare.verdict.status
378        );
379    }
380
381    #[test]
382    fn snapshot_report_from_pass() {
383        let compare = create_pass_compare_receipt();
384        let result = ReportUseCase::execute(ReportRequest { compare });
385        insta::assert_json_snapshot!("report_pass", serde_json::to_value(&result.report).unwrap());
386    }
387
388    #[test]
389    fn snapshot_report_from_warn() {
390        let compare = create_warn_compare_receipt();
391        let result = ReportUseCase::execute(ReportRequest { compare });
392        insta::assert_json_snapshot!("report_warn", serde_json::to_value(&result.report).unwrap());
393    }
394
395    #[test]
396    fn snapshot_report_from_fail() {
397        let compare = create_fail_compare_receipt();
398        let result = ReportUseCase::execute(ReportRequest { compare });
399        insta::assert_json_snapshot!("report_fail", serde_json::to_value(&result.report).unwrap());
400    }
401
402    #[test]
403    fn snapshot_report_multi_metric_findings() {
404        let mut budgets = BTreeMap::new();
405        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
406        budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
407
408        let mut deltas = BTreeMap::new();
409        deltas.insert(
410            Metric::WallMs,
411            Delta {
412                baseline: 1000.0,
413                current: 1190.0,
414                ratio: 1.19,
415                pct: 0.19,
416                regression: 0.19,
417                cv: None,
418                noise_threshold: None,
419                statistic: MetricStatistic::Median,
420                significance: None,
421                status: MetricStatus::Warn,
422            },
423        );
424        deltas.insert(
425            Metric::MaxRssKb,
426            Delta {
427                baseline: 1024.0,
428                current: 1280.0,
429                ratio: 1.25,
430                pct: 0.25,
431                regression: 0.25,
432                cv: None,
433                noise_threshold: None,
434                statistic: MetricStatistic::Median,
435                significance: None,
436                status: MetricStatus::Fail,
437            },
438        );
439
440        let compare = CompareReceipt {
441            schema: COMPARE_SCHEMA_V1.to_string(),
442            tool: ToolInfo {
443                name: "perfgate".to_string(),
444                version: "0.1.0".to_string(),
445            },
446            bench: BenchMeta {
447                name: "multi-metric".to_string(),
448                cwd: None,
449                command: vec!["bench".to_string()],
450                repeat: 10,
451                warmup: 2,
452                work_units: None,
453                timeout_ms: None,
454            },
455            baseline_ref: CompareRef {
456                path: Some("baseline.json".to_string()),
457                run_id: Some("base-001".to_string()),
458            },
459            current_ref: CompareRef {
460                path: Some("current.json".to_string()),
461                run_id: Some("cur-001".to_string()),
462            },
463            budgets,
464            deltas,
465            verdict: Verdict {
466                status: VerdictStatus::Fail,
467                counts: VerdictCounts {
468                    pass: 0,
469                    warn: 1,
470                    fail: 1,
471                    skip: 0,
472                },
473                reasons: vec!["wall_ms_warn".to_string(), "max_rss_kb_fail".to_string()],
474            },
475        };
476
477        let result = ReportUseCase::execute(ReportRequest { compare });
478        insta::assert_json_snapshot!(
479            "report_multi_metric",
480            serde_json::to_value(&result.report).unwrap()
481        );
482    }
483
484    #[test]
485    fn test_report_is_deterministic() {
486        let compare = create_fail_compare_receipt();
487
488        let result1 = ReportUseCase::execute(ReportRequest {
489            compare: compare.clone(),
490        });
491        let result2 = ReportUseCase::execute(ReportRequest {
492            compare: compare.clone(),
493        });
494
495        let json1 = serde_json::to_string(&result1.report).unwrap();
496        let json2 = serde_json::to_string(&result2.report).unwrap();
497
498        assert_eq!(json1, json2, "Report output should be deterministic");
499    }
500
501    #[test]
502    fn test_finding_count_equals_warn_plus_fail() {
503        let mut budgets = BTreeMap::new();
504        budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
505        budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
506
507        let mut deltas = BTreeMap::new();
508        deltas.insert(
509            Metric::WallMs,
510            Delta {
511                baseline: 1000.0,
512                current: 1190.0,
513                ratio: 1.19,
514                pct: 0.19,
515                regression: 0.19,
516                cv: None,
517                noise_threshold: None,
518                statistic: MetricStatistic::Median,
519                significance: None,
520                status: MetricStatus::Warn,
521            },
522        );
523        deltas.insert(
524            Metric::MaxRssKb,
525            Delta {
526                baseline: 1024.0,
527                current: 1280.0,
528                ratio: 1.25,
529                pct: 0.25,
530                regression: 0.25,
531                cv: None,
532                noise_threshold: None,
533                statistic: MetricStatistic::Median,
534                significance: None,
535                status: MetricStatus::Fail,
536            },
537        );
538
539        let compare = CompareReceipt {
540            schema: COMPARE_SCHEMA_V1.to_string(),
541            tool: ToolInfo {
542                name: "perfgate".to_string(),
543                version: "0.1.0".to_string(),
544            },
545            bench: BenchMeta {
546                name: "test-bench".to_string(),
547                cwd: None,
548                command: vec!["test".to_string()],
549                repeat: 5,
550                warmup: 0,
551                work_units: None,
552                timeout_ms: None,
553            },
554            baseline_ref: CompareRef {
555                path: None,
556                run_id: None,
557            },
558            current_ref: CompareRef {
559                path: None,
560                run_id: None,
561            },
562            budgets,
563            deltas,
564            verdict: Verdict {
565                status: VerdictStatus::Fail,
566                counts: VerdictCounts {
567                    pass: 0,
568                    warn: 1,
569                    fail: 1,
570                    skip: 0,
571                },
572                reasons: vec![],
573            },
574        };
575
576        let result = ReportUseCase::execute(ReportRequest { compare });
577
578        // Finding count should equal warn + fail
579        assert_eq!(result.report.findings.len(), 2);
580        assert_eq!(
581            result.report.findings.len(),
582            (result.report.summary.warn_count + result.report.summary.fail_count) as usize
583        );
584    }
585}
586
587#[cfg(test)]
588mod property_tests {
589    use super::*;
590    use perfgate_types::{
591        BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
592        MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
593    };
594    use proptest::prelude::*;
595    use std::collections::BTreeMap;
596
597    // --- Strategies for generating CompareReceipt ---
598
599    fn non_empty_string() -> impl Strategy<Value = String> {
600        "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
601    }
602
603    fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
604        (non_empty_string(), non_empty_string())
605            .prop_map(|(name, version)| ToolInfo { name, version })
606    }
607
608    fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
609        (
610            non_empty_string(),
611            proptest::option::of(non_empty_string()),
612            proptest::collection::vec(non_empty_string(), 1..5),
613            1u32..100,
614            0u32..10,
615            proptest::option::of(1u64..10000),
616            proptest::option::of(100u64..60000),
617        )
618            .prop_map(
619                |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
620                    name,
621                    cwd,
622                    command,
623                    repeat,
624                    warmup,
625                    work_units,
626                    timeout_ms,
627                },
628            )
629    }
630
631    fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
632        (
633            proptest::option::of(non_empty_string()),
634            proptest::option::of(non_empty_string()),
635        )
636            .prop_map(|(path, run_id)| CompareRef { path, run_id })
637    }
638
639    fn direction_strategy() -> impl Strategy<Value = Direction> {
640        prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
641    }
642
643    fn budget_strategy() -> impl Strategy<Value = Budget> {
644        (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
645            |(threshold, warn_factor, direction)| {
646                let warn_threshold = threshold * warn_factor;
647                Budget {
648                    noise_threshold: None,
649                    noise_policy: perfgate_types::NoisePolicy::Ignore,
650                    threshold,
651                    warn_threshold,
652                    direction,
653                }
654            },
655        )
656    }
657
658    fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
659        prop_oneof![
660            Just(MetricStatus::Pass),
661            Just(MetricStatus::Warn),
662            Just(MetricStatus::Fail),
663            Just(MetricStatus::Skip),
664        ]
665    }
666
667    fn delta_strategy() -> impl Strategy<Value = Delta> {
668        (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
669            |(baseline, current, status)| {
670                let ratio = current / baseline;
671                let pct = (current - baseline) / baseline;
672                let regression = if pct > 0.0 { pct } else { 0.0 };
673                Delta {
674                    baseline,
675                    current,
676                    ratio,
677                    pct,
678                    regression,
679                    cv: None,
680                    noise_threshold: None,
681                    statistic: MetricStatistic::Median,
682                    significance: None,
683                    status,
684                }
685            },
686        )
687    }
688
689    fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
690        prop_oneof![
691            Just(VerdictStatus::Pass),
692            Just(VerdictStatus::Warn),
693            Just(VerdictStatus::Fail),
694            Just(VerdictStatus::Skip),
695        ]
696    }
697
698    fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
699        (0u32..10, 0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail, skip)| {
700            VerdictCounts {
701                pass,
702                warn,
703                fail,
704                skip,
705            }
706        })
707    }
708
709    fn verdict_strategy() -> impl Strategy<Value = Verdict> {
710        (
711            verdict_status_strategy(),
712            verdict_counts_strategy(),
713            proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
714        )
715            .prop_map(|(status, counts, reasons)| Verdict {
716                status,
717                counts,
718                reasons,
719            })
720    }
721
722    fn metric_strategy() -> impl Strategy<Value = Metric> {
723        prop_oneof![
724            Just(Metric::WallMs),
725            Just(Metric::MaxRssKb),
726            Just(Metric::ThroughputPerS),
727        ]
728    }
729
730    fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
731        proptest::collection::btree_map(metric_strategy(), budget_strategy(), 0..4)
732    }
733
734    fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
735        proptest::collection::btree_map(metric_strategy(), delta_strategy(), 0..4)
736    }
737
738    fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
739        (
740            tool_info_strategy(),
741            bench_meta_strategy(),
742            compare_ref_strategy(),
743            compare_ref_strategy(),
744            budgets_map_strategy(),
745            deltas_map_strategy(),
746            verdict_strategy(),
747        )
748            .prop_map(
749                |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
750                    CompareReceipt {
751                        schema: COMPARE_SCHEMA_V1.to_string(),
752                        tool,
753                        bench,
754                        baseline_ref,
755                        current_ref,
756                        budgets,
757                        deltas,
758                        verdict,
759                    }
760                },
761            )
762    }
763
764    proptest! {
765        #![proptest_config(ProptestConfig::with_cases(100))]
766
767        /// Property: Report verdict always matches compare verdict
768        #[test]
769        fn report_verdict_matches_compare_verdict(compare in compare_receipt_strategy()) {
770            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
771
772            prop_assert_eq!(
773                result.report.verdict.status,
774                compare.verdict.status,
775                "Report verdict should match compare verdict"
776            );
777        }
778
779        /// Property: Finding count equals warn + fail delta count
780        #[test]
781        fn finding_count_equals_warn_plus_fail(compare in compare_receipt_strategy()) {
782            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
783
784            let warn_fail_count = compare.deltas.values()
785                .filter(|d| d.status == MetricStatus::Warn || d.status == MetricStatus::Fail)
786                .count();
787
788            prop_assert_eq!(
789                result.report.findings.len(),
790                warn_fail_count,
791                "Finding count should equal warn + fail delta count"
792            );
793        }
794
795        /// Property: Report is deterministic (same input -> same output)
796        #[test]
797        fn report_is_deterministic(compare in compare_receipt_strategy()) {
798            let result1 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
799            let result2 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
800
801            let json1 = serde_json::to_string(&result1.report).unwrap();
802            let json2 = serde_json::to_string(&result2.report).unwrap();
803
804            prop_assert_eq!(json1, json2, "Report output should be deterministic");
805        }
806
807        /// Property: Report type is always perfgate.report.v1
808        #[test]
809        fn report_type_is_always_v1(compare in compare_receipt_strategy()) {
810            let result = ReportUseCase::execute(ReportRequest { compare });
811
812            prop_assert_eq!(
813                result.report.report_type,
814                REPORT_SCHEMA_V1,
815                "Report type should always be perfgate.report.v1"
816            );
817        }
818
819        /// Property: Summary counts match verdict counts
820        #[test]
821        fn summary_counts_match_verdict_counts(compare in compare_receipt_strategy()) {
822            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
823
824            prop_assert_eq!(
825                result.report.summary.pass_count,
826                compare.verdict.counts.pass,
827                "Summary pass count should match verdict counts"
828            );
829            prop_assert_eq!(
830                result.report.summary.warn_count,
831                compare.verdict.counts.warn,
832                "Summary warn count should match verdict counts"
833            );
834            prop_assert_eq!(
835                result.report.summary.fail_count,
836                compare.verdict.counts.fail,
837                "Summary fail count should match verdict counts"
838            );
839            prop_assert_eq!(
840                result.report.summary.skip_count,
841                compare.verdict.counts.skip,
842                "Summary skip count should match verdict counts"
843            );
844        }
845
846        /// Property: Findings have correct severity
847        #[test]
848        fn findings_have_correct_severity(compare in compare_receipt_strategy()) {
849            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
850
851            for finding in &result.report.findings {
852                match finding.code.as_str() {
853                    "metric_fail" => {
854                        prop_assert_eq!(
855                            finding.severity,
856                            Severity::Fail,
857                            "metric_fail findings should have Fail severity"
858                        );
859                    }
860                    "metric_warn" => {
861                        prop_assert_eq!(
862                            finding.severity,
863                            Severity::Warn,
864                            "metric_warn findings should have Warn severity"
865                        );
866                    }
867                    _ => {
868                        prop_assert!(false, "Unexpected finding code: {}", finding.code);
869                    }
870                }
871            }
872        }
873    }
874}