Skip to main content

perfgate_app/
report.rs

1//! Report use case for generating cockpit-compatible report envelopes.
2//!
3//! This module provides functionality for wrapping a CompareReceipt into
4//! a `perfgate.report.v1` envelope suitable for cockpit integration and
5//! CI dashboard display.
6
7use perfgate_domain::derive_report;
8use perfgate_types::{
9    CompareReceipt, Direction, FINDING_CODE_METRIC_FAIL, FindingData, PerfgateReport,
10    REPORT_SCHEMA_V1, ReportFinding, ReportSummary, Severity,
11};
12
13/// Request for generating a report from a compare receipt.
14#[derive(Debug, Clone)]
15pub struct ReportRequest {
16    /// The compare receipt to wrap into a report.
17    pub compare: CompareReceipt,
18}
19
20/// Result of a report generation operation.
21#[derive(Debug, Clone)]
22pub struct ReportResult {
23    /// The generated report.
24    pub report: PerfgateReport,
25}
26
27/// Use case for generating perfgate reports.
28pub struct ReportUseCase;
29
30impl ReportUseCase {
31    /// Execute the report generation.
32    ///
33    /// Creates a PerfgateReport from a CompareReceipt by:
34    /// - Setting report_type to "perfgate.report.v1"
35    /// - Copying verdict from compare receipt
36    /// - Including the full compare receipt
37    /// - Deriving findings from domain logic (warn/fail metrics)
38    /// - Computing summary counts
39    ///
40    /// # Invariants
41    ///
42    /// - Report verdict matches compare verdict
43    /// - Finding count equals warn + fail count in deltas
44    /// - Output is deterministic (same input -> same output)
45    pub fn execute(req: ReportRequest) -> ReportResult {
46        let domain_report = derive_report(&req.compare);
47
48        // Convert domain findings to types findings
49        let findings: Vec<ReportFinding> = domain_report
50            .findings
51            .into_iter()
52            .map(|f| {
53                let severity = if f.code == FINDING_CODE_METRIC_FAIL {
54                    Severity::Fail
55                } else {
56                    Severity::Warn
57                };
58
59                let direction = req
60                    .compare
61                    .budgets
62                    .iter()
63                    .find(|(metric, _)| metric_to_string(**metric) == f.data.metric_name)
64                    .map(|(_, budget)| budget.direction)
65                    .unwrap_or(Direction::Lower);
66
67                let message = format!(
68                    "{} for {}: {:.2}% regression (threshold: {:.2}%)",
69                    if severity == Severity::Fail {
70                        "Performance regression exceeded threshold"
71                    } else {
72                        "Performance regression near threshold"
73                    },
74                    f.data.metric_name,
75                    f.data.regression_pct * 100.0,
76                    f.data.threshold * 100.0
77                );
78
79                ReportFinding {
80                    check_id: f.check_id,
81                    code: f.code,
82                    severity,
83                    message,
84                    data: Some(FindingData {
85                        metric_name: f.data.metric_name,
86                        baseline: f.data.baseline,
87                        current: f.data.current,
88                        regression_pct: f.data.regression_pct,
89                        threshold: f.data.threshold,
90                        direction,
91                    }),
92                }
93            })
94            .collect();
95
96        let summary = ReportSummary {
97            pass_count: req.compare.verdict.counts.pass,
98            warn_count: req.compare.verdict.counts.warn,
99            fail_count: req.compare.verdict.counts.fail,
100            total_count: req.compare.verdict.counts.pass
101                + req.compare.verdict.counts.warn
102                + req.compare.verdict.counts.fail,
103        };
104
105        let report = PerfgateReport {
106            report_type: REPORT_SCHEMA_V1.to_string(),
107            verdict: req.compare.verdict.clone(),
108            compare: Some(req.compare),
109            findings,
110            summary,
111        };
112
113        ReportResult { report }
114    }
115}
116
117/// Converts a Metric enum to its string representation.
118fn metric_to_string(metric: perfgate_types::Metric) -> String {
119    metric.as_str().to_string()
120}
121
122#[cfg(test)]
123mod tests {
124    use super::*;
125    use perfgate_types::{
126        BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
127        MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
128    };
129    use std::collections::BTreeMap;
130
131    fn create_pass_compare_receipt() -> CompareReceipt {
132        let mut budgets = BTreeMap::new();
133        budgets.insert(
134            Metric::WallMs,
135            Budget {
136                threshold: 0.2,
137                warn_threshold: 0.18,
138                direction: Direction::Lower,
139            },
140        );
141
142        let mut deltas = BTreeMap::new();
143        deltas.insert(
144            Metric::WallMs,
145            Delta {
146                baseline: 1000.0,
147                current: 900.0,
148                ratio: 0.9,
149                pct: -0.1,
150                regression: 0.0,
151                statistic: MetricStatistic::Median,
152                significance: None,
153                status: MetricStatus::Pass,
154            },
155        );
156
157        CompareReceipt {
158            schema: COMPARE_SCHEMA_V1.to_string(),
159            tool: ToolInfo {
160                name: "perfgate".to_string(),
161                version: "0.1.0".to_string(),
162            },
163            bench: BenchMeta {
164                name: "test-bench".to_string(),
165                cwd: None,
166                command: vec!["echo".to_string(), "hello".to_string()],
167                repeat: 5,
168                warmup: 0,
169                work_units: None,
170                timeout_ms: None,
171            },
172            baseline_ref: CompareRef {
173                path: Some("baseline.json".to_string()),
174                run_id: Some("baseline-001".to_string()),
175            },
176            current_ref: CompareRef {
177                path: Some("current.json".to_string()),
178                run_id: Some("current-001".to_string()),
179            },
180            budgets,
181            deltas,
182            verdict: Verdict {
183                status: VerdictStatus::Pass,
184                counts: VerdictCounts {
185                    pass: 1,
186                    warn: 0,
187                    fail: 0,
188                },
189                reasons: vec![],
190            },
191        }
192    }
193
194    fn create_warn_compare_receipt() -> CompareReceipt {
195        let mut budgets = BTreeMap::new();
196        budgets.insert(
197            Metric::WallMs,
198            Budget {
199                threshold: 0.2,
200                warn_threshold: 0.18,
201                direction: Direction::Lower,
202            },
203        );
204
205        let mut deltas = BTreeMap::new();
206        deltas.insert(
207            Metric::WallMs,
208            Delta {
209                baseline: 1000.0,
210                current: 1190.0,
211                ratio: 1.19,
212                pct: 0.19,
213                regression: 0.19,
214                statistic: MetricStatistic::Median,
215                significance: None,
216                status: MetricStatus::Warn,
217            },
218        );
219
220        CompareReceipt {
221            schema: COMPARE_SCHEMA_V1.to_string(),
222            tool: ToolInfo {
223                name: "perfgate".to_string(),
224                version: "0.1.0".to_string(),
225            },
226            bench: BenchMeta {
227                name: "test-bench".to_string(),
228                cwd: None,
229                command: vec!["echo".to_string(), "hello".to_string()],
230                repeat: 5,
231                warmup: 0,
232                work_units: None,
233                timeout_ms: None,
234            },
235            baseline_ref: CompareRef {
236                path: Some("baseline.json".to_string()),
237                run_id: Some("baseline-001".to_string()),
238            },
239            current_ref: CompareRef {
240                path: Some("current.json".to_string()),
241                run_id: Some("current-001".to_string()),
242            },
243            budgets,
244            deltas,
245            verdict: Verdict {
246                status: VerdictStatus::Warn,
247                counts: VerdictCounts {
248                    pass: 0,
249                    warn: 1,
250                    fail: 0,
251                },
252                reasons: vec!["wall_ms_warn".to_string()],
253            },
254        }
255    }
256
257    fn create_fail_compare_receipt() -> CompareReceipt {
258        let mut budgets = BTreeMap::new();
259        budgets.insert(
260            Metric::WallMs,
261            Budget {
262                threshold: 0.2,
263                warn_threshold: 0.18,
264                direction: Direction::Lower,
265            },
266        );
267
268        let mut deltas = BTreeMap::new();
269        deltas.insert(
270            Metric::WallMs,
271            Delta {
272                baseline: 1000.0,
273                current: 1500.0,
274                ratio: 1.5,
275                pct: 0.5,
276                regression: 0.5,
277                statistic: MetricStatistic::Median,
278                significance: None,
279                status: MetricStatus::Fail,
280            },
281        );
282
283        CompareReceipt {
284            schema: COMPARE_SCHEMA_V1.to_string(),
285            tool: ToolInfo {
286                name: "perfgate".to_string(),
287                version: "0.1.0".to_string(),
288            },
289            bench: BenchMeta {
290                name: "test-bench".to_string(),
291                cwd: None,
292                command: vec!["echo".to_string(), "hello".to_string()],
293                repeat: 5,
294                warmup: 0,
295                work_units: None,
296                timeout_ms: None,
297            },
298            baseline_ref: CompareRef {
299                path: Some("baseline.json".to_string()),
300                run_id: Some("baseline-001".to_string()),
301            },
302            current_ref: CompareRef {
303                path: Some("current.json".to_string()),
304                run_id: Some("current-001".to_string()),
305            },
306            budgets,
307            deltas,
308            verdict: Verdict {
309                status: VerdictStatus::Fail,
310                counts: VerdictCounts {
311                    pass: 0,
312                    warn: 0,
313                    fail: 1,
314                },
315                reasons: vec!["wall_ms_fail".to_string()],
316            },
317        }
318    }
319
320    #[test]
321    fn test_report_from_pass_compare() {
322        let compare = create_pass_compare_receipt();
323        let result = ReportUseCase::execute(ReportRequest { compare });
324
325        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
326        assert_eq!(result.report.verdict.status, VerdictStatus::Pass);
327        assert!(result.report.findings.is_empty());
328        assert_eq!(result.report.summary.pass_count, 1);
329        assert_eq!(result.report.summary.warn_count, 0);
330        assert_eq!(result.report.summary.fail_count, 0);
331        assert_eq!(result.report.summary.total_count, 1);
332    }
333
334    #[test]
335    fn test_report_from_warn_compare() {
336        let compare = create_warn_compare_receipt();
337        let result = ReportUseCase::execute(ReportRequest { compare });
338
339        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
340        assert_eq!(result.report.verdict.status, VerdictStatus::Warn);
341        assert_eq!(result.report.findings.len(), 1);
342        assert_eq!(result.report.findings[0].code, "metric_warn");
343        assert_eq!(result.report.findings[0].severity, Severity::Warn);
344        assert_eq!(result.report.summary.warn_count, 1);
345    }
346
347    #[test]
348    fn test_report_from_fail_compare() {
349        let compare = create_fail_compare_receipt();
350        let result = ReportUseCase::execute(ReportRequest { compare });
351
352        assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
353        assert_eq!(result.report.verdict.status, VerdictStatus::Fail);
354        assert_eq!(result.report.findings.len(), 1);
355        assert_eq!(result.report.findings[0].code, "metric_fail");
356        assert_eq!(result.report.findings[0].severity, Severity::Fail);
357        assert_eq!(result.report.summary.fail_count, 1);
358    }
359
360    #[test]
361    fn test_report_verdict_matches_compare_verdict() {
362        let pass_compare = create_pass_compare_receipt();
363        let pass_result = ReportUseCase::execute(ReportRequest {
364            compare: pass_compare.clone(),
365        });
366        assert_eq!(
367            pass_result.report.verdict.status,
368            pass_compare.verdict.status
369        );
370
371        let warn_compare = create_warn_compare_receipt();
372        let warn_result = ReportUseCase::execute(ReportRequest {
373            compare: warn_compare.clone(),
374        });
375        assert_eq!(
376            warn_result.report.verdict.status,
377            warn_compare.verdict.status
378        );
379
380        let fail_compare = create_fail_compare_receipt();
381        let fail_result = ReportUseCase::execute(ReportRequest {
382            compare: fail_compare.clone(),
383        });
384        assert_eq!(
385            fail_result.report.verdict.status,
386            fail_compare.verdict.status
387        );
388    }
389
390    #[test]
391    fn snapshot_report_from_pass() {
392        let compare = create_pass_compare_receipt();
393        let result = ReportUseCase::execute(ReportRequest { compare });
394        insta::assert_json_snapshot!("report_pass", serde_json::to_value(&result.report).unwrap());
395    }
396
397    #[test]
398    fn snapshot_report_from_warn() {
399        let compare = create_warn_compare_receipt();
400        let result = ReportUseCase::execute(ReportRequest { compare });
401        insta::assert_json_snapshot!("report_warn", serde_json::to_value(&result.report).unwrap());
402    }
403
404    #[test]
405    fn snapshot_report_from_fail() {
406        let compare = create_fail_compare_receipt();
407        let result = ReportUseCase::execute(ReportRequest { compare });
408        insta::assert_json_snapshot!("report_fail", serde_json::to_value(&result.report).unwrap());
409    }
410
411    #[test]
412    fn snapshot_report_multi_metric_findings() {
413        let mut budgets = BTreeMap::new();
414        budgets.insert(
415            Metric::WallMs,
416            Budget {
417                threshold: 0.2,
418                warn_threshold: 0.18,
419                direction: Direction::Lower,
420            },
421        );
422        budgets.insert(
423            Metric::MaxRssKb,
424            Budget {
425                threshold: 0.15,
426                warn_threshold: 0.135,
427                direction: Direction::Lower,
428            },
429        );
430
431        let mut deltas = BTreeMap::new();
432        deltas.insert(
433            Metric::WallMs,
434            Delta {
435                baseline: 1000.0,
436                current: 1190.0,
437                ratio: 1.19,
438                pct: 0.19,
439                regression: 0.19,
440                statistic: MetricStatistic::Median,
441                significance: None,
442                status: MetricStatus::Warn,
443            },
444        );
445        deltas.insert(
446            Metric::MaxRssKb,
447            Delta {
448                baseline: 1024.0,
449                current: 1280.0,
450                ratio: 1.25,
451                pct: 0.25,
452                regression: 0.25,
453                statistic: MetricStatistic::Median,
454                significance: None,
455                status: MetricStatus::Fail,
456            },
457        );
458
459        let compare = CompareReceipt {
460            schema: COMPARE_SCHEMA_V1.to_string(),
461            tool: ToolInfo {
462                name: "perfgate".to_string(),
463                version: "0.1.0".to_string(),
464            },
465            bench: BenchMeta {
466                name: "multi-metric".to_string(),
467                cwd: None,
468                command: vec!["bench".to_string()],
469                repeat: 10,
470                warmup: 2,
471                work_units: None,
472                timeout_ms: None,
473            },
474            baseline_ref: CompareRef {
475                path: Some("baseline.json".to_string()),
476                run_id: Some("base-001".to_string()),
477            },
478            current_ref: CompareRef {
479                path: Some("current.json".to_string()),
480                run_id: Some("cur-001".to_string()),
481            },
482            budgets,
483            deltas,
484            verdict: Verdict {
485                status: VerdictStatus::Fail,
486                counts: VerdictCounts {
487                    pass: 0,
488                    warn: 1,
489                    fail: 1,
490                },
491                reasons: vec!["wall_ms_warn".to_string(), "max_rss_kb_fail".to_string()],
492            },
493        };
494
495        let result = ReportUseCase::execute(ReportRequest { compare });
496        insta::assert_json_snapshot!(
497            "report_multi_metric",
498            serde_json::to_value(&result.report).unwrap()
499        );
500    }
501
502    #[test]
503    fn test_report_is_deterministic() {
504        let compare = create_fail_compare_receipt();
505
506        let result1 = ReportUseCase::execute(ReportRequest {
507            compare: compare.clone(),
508        });
509        let result2 = ReportUseCase::execute(ReportRequest {
510            compare: compare.clone(),
511        });
512
513        let json1 = serde_json::to_string(&result1.report).unwrap();
514        let json2 = serde_json::to_string(&result2.report).unwrap();
515
516        assert_eq!(json1, json2, "Report output should be deterministic");
517    }
518
519    #[test]
520    fn test_finding_count_equals_warn_plus_fail() {
521        let mut budgets = BTreeMap::new();
522        budgets.insert(
523            Metric::WallMs,
524            Budget {
525                threshold: 0.2,
526                warn_threshold: 0.18,
527                direction: Direction::Lower,
528            },
529        );
530        budgets.insert(
531            Metric::MaxRssKb,
532            Budget {
533                threshold: 0.15,
534                warn_threshold: 0.135,
535                direction: Direction::Lower,
536            },
537        );
538
539        let mut deltas = BTreeMap::new();
540        deltas.insert(
541            Metric::WallMs,
542            Delta {
543                baseline: 1000.0,
544                current: 1190.0,
545                ratio: 1.19,
546                pct: 0.19,
547                regression: 0.19,
548                statistic: MetricStatistic::Median,
549                significance: None,
550                status: MetricStatus::Warn,
551            },
552        );
553        deltas.insert(
554            Metric::MaxRssKb,
555            Delta {
556                baseline: 1024.0,
557                current: 1280.0,
558                ratio: 1.25,
559                pct: 0.25,
560                regression: 0.25,
561                statistic: MetricStatistic::Median,
562                significance: None,
563                status: MetricStatus::Fail,
564            },
565        );
566
567        let compare = CompareReceipt {
568            schema: COMPARE_SCHEMA_V1.to_string(),
569            tool: ToolInfo {
570                name: "perfgate".to_string(),
571                version: "0.1.0".to_string(),
572            },
573            bench: BenchMeta {
574                name: "test-bench".to_string(),
575                cwd: None,
576                command: vec!["test".to_string()],
577                repeat: 5,
578                warmup: 0,
579                work_units: None,
580                timeout_ms: None,
581            },
582            baseline_ref: CompareRef {
583                path: None,
584                run_id: None,
585            },
586            current_ref: CompareRef {
587                path: None,
588                run_id: None,
589            },
590            budgets,
591            deltas,
592            verdict: Verdict {
593                status: VerdictStatus::Fail,
594                counts: VerdictCounts {
595                    pass: 0,
596                    warn: 1,
597                    fail: 1,
598                },
599                reasons: vec![],
600            },
601        };
602
603        let result = ReportUseCase::execute(ReportRequest { compare });
604
605        // Finding count should equal warn + fail
606        assert_eq!(result.report.findings.len(), 2);
607        assert_eq!(
608            result.report.findings.len(),
609            (result.report.summary.warn_count + result.report.summary.fail_count) as usize
610        );
611    }
612}
613
614#[cfg(test)]
615mod property_tests {
616    use super::*;
617    use perfgate_types::{
618        BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
619        MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
620    };
621    use proptest::prelude::*;
622    use std::collections::BTreeMap;
623
624    // --- Strategies for generating CompareReceipt ---
625
626    fn non_empty_string() -> impl Strategy<Value = String> {
627        "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
628    }
629
630    fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
631        (non_empty_string(), non_empty_string())
632            .prop_map(|(name, version)| ToolInfo { name, version })
633    }
634
635    fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
636        (
637            non_empty_string(),
638            proptest::option::of(non_empty_string()),
639            proptest::collection::vec(non_empty_string(), 1..5),
640            1u32..100,
641            0u32..10,
642            proptest::option::of(1u64..10000),
643            proptest::option::of(100u64..60000),
644        )
645            .prop_map(
646                |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
647                    name,
648                    cwd,
649                    command,
650                    repeat,
651                    warmup,
652                    work_units,
653                    timeout_ms,
654                },
655            )
656    }
657
658    fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
659        (
660            proptest::option::of(non_empty_string()),
661            proptest::option::of(non_empty_string()),
662        )
663            .prop_map(|(path, run_id)| CompareRef { path, run_id })
664    }
665
666    fn direction_strategy() -> impl Strategy<Value = Direction> {
667        prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
668    }
669
670    fn budget_strategy() -> impl Strategy<Value = Budget> {
671        (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
672            |(threshold, warn_factor, direction)| {
673                let warn_threshold = threshold * warn_factor;
674                Budget {
675                    threshold,
676                    warn_threshold,
677                    direction,
678                }
679            },
680        )
681    }
682
683    fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
684        prop_oneof![
685            Just(MetricStatus::Pass),
686            Just(MetricStatus::Warn),
687            Just(MetricStatus::Fail),
688        ]
689    }
690
691    fn delta_strategy() -> impl Strategy<Value = Delta> {
692        (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
693            |(baseline, current, status)| {
694                let ratio = current / baseline;
695                let pct = (current - baseline) / baseline;
696                let regression = if pct > 0.0 { pct } else { 0.0 };
697                Delta {
698                    baseline,
699                    current,
700                    ratio,
701                    pct,
702                    regression,
703                    statistic: MetricStatistic::Median,
704                    significance: None,
705                    status,
706                }
707            },
708        )
709    }
710
711    fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
712        prop_oneof![
713            Just(VerdictStatus::Pass),
714            Just(VerdictStatus::Warn),
715            Just(VerdictStatus::Fail),
716        ]
717    }
718
719    fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
720        (0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail)| VerdictCounts {
721            pass,
722            warn,
723            fail,
724        })
725    }
726
727    fn verdict_strategy() -> impl Strategy<Value = Verdict> {
728        (
729            verdict_status_strategy(),
730            verdict_counts_strategy(),
731            proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
732        )
733            .prop_map(|(status, counts, reasons)| Verdict {
734                status,
735                counts,
736                reasons,
737            })
738    }
739
740    fn metric_strategy() -> impl Strategy<Value = Metric> {
741        prop_oneof![
742            Just(Metric::WallMs),
743            Just(Metric::MaxRssKb),
744            Just(Metric::ThroughputPerS),
745        ]
746    }
747
748    fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
749        proptest::collection::btree_map(metric_strategy(), budget_strategy(), 0..4)
750    }
751
752    fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
753        proptest::collection::btree_map(metric_strategy(), delta_strategy(), 0..4)
754    }
755
756    fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
757        (
758            tool_info_strategy(),
759            bench_meta_strategy(),
760            compare_ref_strategy(),
761            compare_ref_strategy(),
762            budgets_map_strategy(),
763            deltas_map_strategy(),
764            verdict_strategy(),
765        )
766            .prop_map(
767                |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
768                    CompareReceipt {
769                        schema: COMPARE_SCHEMA_V1.to_string(),
770                        tool,
771                        bench,
772                        baseline_ref,
773                        current_ref,
774                        budgets,
775                        deltas,
776                        verdict,
777                    }
778                },
779            )
780    }
781
782    proptest! {
783        #![proptest_config(ProptestConfig::with_cases(100))]
784
785        /// Property: Report verdict always matches compare verdict
786        #[test]
787        fn report_verdict_matches_compare_verdict(compare in compare_receipt_strategy()) {
788            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
789
790            prop_assert_eq!(
791                result.report.verdict.status,
792                compare.verdict.status,
793                "Report verdict should match compare verdict"
794            );
795        }
796
797        /// Property: Finding count equals warn + fail delta count
798        #[test]
799        fn finding_count_equals_warn_plus_fail(compare in compare_receipt_strategy()) {
800            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
801
802            let warn_fail_count = compare.deltas.values()
803                .filter(|d| d.status == MetricStatus::Warn || d.status == MetricStatus::Fail)
804                .count();
805
806            prop_assert_eq!(
807                result.report.findings.len(),
808                warn_fail_count,
809                "Finding count should equal warn + fail delta count"
810            );
811        }
812
813        /// Property: Report is deterministic (same input -> same output)
814        #[test]
815        fn report_is_deterministic(compare in compare_receipt_strategy()) {
816            let result1 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
817            let result2 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
818
819            let json1 = serde_json::to_string(&result1.report).unwrap();
820            let json2 = serde_json::to_string(&result2.report).unwrap();
821
822            prop_assert_eq!(json1, json2, "Report output should be deterministic");
823        }
824
825        /// Property: Report type is always perfgate.report.v1
826        #[test]
827        fn report_type_is_always_v1(compare in compare_receipt_strategy()) {
828            let result = ReportUseCase::execute(ReportRequest { compare });
829
830            prop_assert_eq!(
831                result.report.report_type,
832                REPORT_SCHEMA_V1,
833                "Report type should always be perfgate.report.v1"
834            );
835        }
836
837        /// Property: Summary counts match verdict counts
838        #[test]
839        fn summary_counts_match_verdict_counts(compare in compare_receipt_strategy()) {
840            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
841
842            prop_assert_eq!(
843                result.report.summary.pass_count,
844                compare.verdict.counts.pass,
845                "Summary pass count should match verdict counts"
846            );
847            prop_assert_eq!(
848                result.report.summary.warn_count,
849                compare.verdict.counts.warn,
850                "Summary warn count should match verdict counts"
851            );
852            prop_assert_eq!(
853                result.report.summary.fail_count,
854                compare.verdict.counts.fail,
855                "Summary fail count should match verdict counts"
856            );
857        }
858
859        /// Property: Findings have correct severity
860        #[test]
861        fn findings_have_correct_severity(compare in compare_receipt_strategy()) {
862            let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
863
864            for finding in &result.report.findings {
865                match finding.code.as_str() {
866                    "metric_fail" => {
867                        prop_assert_eq!(
868                            finding.severity,
869                            Severity::Fail,
870                            "metric_fail findings should have Fail severity"
871                        );
872                    }
873                    "metric_warn" => {
874                        prop_assert_eq!(
875                            finding.severity,
876                            Severity::Warn,
877                            "metric_warn findings should have Warn severity"
878                        );
879                    }
880                    _ => {
881                        prop_assert!(false, "Unexpected finding code: {}", finding.code);
882                    }
883                }
884            }
885        }
886    }
887}