1use perfgate_domain::derive_report;
8use perfgate_types::{
9 CompareReceipt, Direction, FINDING_CODE_METRIC_FAIL, FindingData, PerfgateReport,
10 REPORT_SCHEMA_V1, ReportFinding, ReportSummary, Severity,
11};
12
13#[derive(Debug, Clone)]
15pub struct ReportRequest {
16 pub compare: CompareReceipt,
18}
19
20#[derive(Debug, Clone)]
22pub struct ReportResult {
23 pub report: PerfgateReport,
25}
26
27pub struct ReportUseCase;
29
30impl ReportUseCase {
31 pub fn execute(req: ReportRequest) -> ReportResult {
46 let domain_report = derive_report(&req.compare);
47
48 let findings: Vec<ReportFinding> = domain_report
50 .findings
51 .into_iter()
52 .map(|f| {
53 let severity = if f.code == FINDING_CODE_METRIC_FAIL {
54 Severity::Fail
55 } else {
56 Severity::Warn
57 };
58
59 let direction = req
60 .compare
61 .budgets
62 .iter()
63 .find(|(metric, _)| metric_to_string(**metric) == f.data.metric_name)
64 .map(|(_, budget)| budget.direction)
65 .unwrap_or(Direction::Lower);
66
67 let message = format!(
68 "{} for {}: {:.2}% regression (threshold: {:.2}%)",
69 if severity == Severity::Fail {
70 "Performance regression exceeded threshold"
71 } else {
72 "Performance regression near threshold"
73 },
74 f.data.metric_name,
75 f.data.regression_pct * 100.0,
76 f.data.threshold * 100.0
77 );
78
79 ReportFinding {
80 check_id: f.check_id,
81 code: f.code,
82 severity,
83 message,
84 data: Some(FindingData {
85 metric_name: f.data.metric_name,
86 baseline: f.data.baseline,
87 current: f.data.current,
88 regression_pct: f.data.regression_pct,
89 threshold: f.data.threshold,
90 direction,
91 }),
92 }
93 })
94 .collect();
95
96 let summary = ReportSummary {
97 pass_count: req.compare.verdict.counts.pass,
98 warn_count: req.compare.verdict.counts.warn,
99 fail_count: req.compare.verdict.counts.fail,
100 skip_count: req.compare.verdict.counts.skip,
101 total_count: req.compare.verdict.counts.pass
102 + req.compare.verdict.counts.warn
103 + req.compare.verdict.counts.fail
104 + req.compare.verdict.counts.skip,
105 };
106
107 let report = PerfgateReport {
108 report_type: REPORT_SCHEMA_V1.to_string(),
109 verdict: req.compare.verdict.clone(),
110 compare: Some(req.compare),
111 findings,
112 summary,
113 };
114
115 ReportResult { report }
116 }
117}
118
119fn metric_to_string(metric: perfgate_types::Metric) -> String {
121 metric.as_str().to_string()
122}
123
124#[cfg(test)]
125mod tests {
126 use super::*;
127 use perfgate_types::{
128 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
129 MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
130 };
131 use std::collections::BTreeMap;
132
133 fn create_pass_compare_receipt() -> CompareReceipt {
134 let mut budgets = BTreeMap::new();
135 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
136
137 let mut deltas = BTreeMap::new();
138 deltas.insert(
139 Metric::WallMs,
140 Delta {
141 baseline: 1000.0,
142 current: 900.0,
143 ratio: 0.9,
144 pct: -0.1,
145 regression: 0.0,
146 cv: None,
147 noise_threshold: None,
148 statistic: MetricStatistic::Median,
149 significance: None,
150 status: MetricStatus::Pass,
151 },
152 );
153
154 CompareReceipt {
155 schema: COMPARE_SCHEMA_V1.to_string(),
156 tool: ToolInfo {
157 name: "perfgate".to_string(),
158 version: "0.1.0".to_string(),
159 },
160 bench: BenchMeta {
161 name: "test-bench".to_string(),
162 cwd: None,
163 command: vec!["echo".to_string(), "hello".to_string()],
164 repeat: 5,
165 warmup: 0,
166 work_units: None,
167 timeout_ms: None,
168 },
169 baseline_ref: CompareRef {
170 path: Some("baseline.json".to_string()),
171 run_id: Some("baseline-001".to_string()),
172 },
173 current_ref: CompareRef {
174 path: Some("current.json".to_string()),
175 run_id: Some("current-001".to_string()),
176 },
177 budgets,
178 deltas,
179 verdict: Verdict {
180 status: VerdictStatus::Pass,
181 counts: VerdictCounts {
182 pass: 1,
183 warn: 0,
184 fail: 0,
185 skip: 0,
186 },
187 reasons: vec![],
188 },
189 }
190 }
191
192 fn create_warn_compare_receipt() -> CompareReceipt {
193 let mut budgets = BTreeMap::new();
194 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
195
196 let mut deltas = BTreeMap::new();
197 deltas.insert(
198 Metric::WallMs,
199 Delta {
200 baseline: 1000.0,
201 current: 1190.0,
202 ratio: 1.19,
203 pct: 0.19,
204 regression: 0.19,
205 cv: None,
206 noise_threshold: None,
207 statistic: MetricStatistic::Median,
208 significance: None,
209 status: MetricStatus::Warn,
210 },
211 );
212
213 CompareReceipt {
214 schema: COMPARE_SCHEMA_V1.to_string(),
215 tool: ToolInfo {
216 name: "perfgate".to_string(),
217 version: "0.1.0".to_string(),
218 },
219 bench: BenchMeta {
220 name: "test-bench".to_string(),
221 cwd: None,
222 command: vec!["echo".to_string(), "hello".to_string()],
223 repeat: 5,
224 warmup: 0,
225 work_units: None,
226 timeout_ms: None,
227 },
228 baseline_ref: CompareRef {
229 path: Some("baseline.json".to_string()),
230 run_id: Some("baseline-001".to_string()),
231 },
232 current_ref: CompareRef {
233 path: Some("current.json".to_string()),
234 run_id: Some("current-001".to_string()),
235 },
236 budgets,
237 deltas,
238 verdict: Verdict {
239 status: VerdictStatus::Warn,
240 counts: VerdictCounts {
241 pass: 0,
242 warn: 1,
243 fail: 0,
244 skip: 0,
245 },
246 reasons: vec!["wall_ms_warn".to_string()],
247 },
248 }
249 }
250
251 fn create_fail_compare_receipt() -> CompareReceipt {
252 let mut budgets = BTreeMap::new();
253 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
254
255 let mut deltas = BTreeMap::new();
256 deltas.insert(
257 Metric::WallMs,
258 Delta {
259 baseline: 1000.0,
260 current: 1500.0,
261 ratio: 1.5,
262 pct: 0.5,
263 regression: 0.5,
264 cv: None,
265 noise_threshold: None,
266 statistic: MetricStatistic::Median,
267 significance: None,
268 status: MetricStatus::Fail,
269 },
270 );
271
272 CompareReceipt {
273 schema: COMPARE_SCHEMA_V1.to_string(),
274 tool: ToolInfo {
275 name: "perfgate".to_string(),
276 version: "0.1.0".to_string(),
277 },
278 bench: BenchMeta {
279 name: "test-bench".to_string(),
280 cwd: None,
281 command: vec!["echo".to_string(), "hello".to_string()],
282 repeat: 5,
283 warmup: 0,
284 work_units: None,
285 timeout_ms: None,
286 },
287 baseline_ref: CompareRef {
288 path: Some("baseline.json".to_string()),
289 run_id: Some("baseline-001".to_string()),
290 },
291 current_ref: CompareRef {
292 path: Some("current.json".to_string()),
293 run_id: Some("current-001".to_string()),
294 },
295 budgets,
296 deltas,
297 verdict: Verdict {
298 status: VerdictStatus::Fail,
299 counts: VerdictCounts {
300 pass: 0,
301 warn: 0,
302 fail: 1,
303 skip: 0,
304 },
305 reasons: vec!["wall_ms_fail".to_string()],
306 },
307 }
308 }
309
310 #[test]
311 fn test_report_from_pass_compare() {
312 let compare = create_pass_compare_receipt();
313 let result = ReportUseCase::execute(ReportRequest { compare });
314
315 assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
316 assert_eq!(result.report.verdict.status, VerdictStatus::Pass);
317 assert!(result.report.findings.is_empty());
318 assert_eq!(result.report.summary.pass_count, 1);
319 assert_eq!(result.report.summary.warn_count, 0);
320 assert_eq!(result.report.summary.fail_count, 0);
321 assert_eq!(result.report.summary.total_count, 1);
322 }
323
324 #[test]
325 fn test_report_from_warn_compare() {
326 let compare = create_warn_compare_receipt();
327 let result = ReportUseCase::execute(ReportRequest { compare });
328
329 assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
330 assert_eq!(result.report.verdict.status, VerdictStatus::Warn);
331 assert_eq!(result.report.findings.len(), 1);
332 assert_eq!(result.report.findings[0].code, "metric_warn");
333 assert_eq!(result.report.findings[0].severity, Severity::Warn);
334 assert_eq!(result.report.summary.warn_count, 1);
335 }
336
337 #[test]
338 fn test_report_from_fail_compare() {
339 let compare = create_fail_compare_receipt();
340 let result = ReportUseCase::execute(ReportRequest { compare });
341
342 assert_eq!(result.report.report_type, REPORT_SCHEMA_V1);
343 assert_eq!(result.report.verdict.status, VerdictStatus::Fail);
344 assert_eq!(result.report.findings.len(), 1);
345 assert_eq!(result.report.findings[0].code, "metric_fail");
346 assert_eq!(result.report.findings[0].severity, Severity::Fail);
347 assert_eq!(result.report.summary.fail_count, 1);
348 }
349
350 #[test]
351 fn test_report_verdict_matches_compare_verdict() {
352 let pass_compare = create_pass_compare_receipt();
353 let pass_result = ReportUseCase::execute(ReportRequest {
354 compare: pass_compare.clone(),
355 });
356 assert_eq!(
357 pass_result.report.verdict.status,
358 pass_compare.verdict.status
359 );
360
361 let warn_compare = create_warn_compare_receipt();
362 let warn_result = ReportUseCase::execute(ReportRequest {
363 compare: warn_compare.clone(),
364 });
365 assert_eq!(
366 warn_result.report.verdict.status,
367 warn_compare.verdict.status
368 );
369
370 let fail_compare = create_fail_compare_receipt();
371 let fail_result = ReportUseCase::execute(ReportRequest {
372 compare: fail_compare.clone(),
373 });
374 assert_eq!(
375 fail_result.report.verdict.status,
376 fail_compare.verdict.status
377 );
378 }
379
380 #[test]
381 fn snapshot_report_from_pass() {
382 let compare = create_pass_compare_receipt();
383 let result = ReportUseCase::execute(ReportRequest { compare });
384 insta::assert_json_snapshot!("report_pass", serde_json::to_value(&result.report).unwrap());
385 }
386
387 #[test]
388 fn snapshot_report_from_warn() {
389 let compare = create_warn_compare_receipt();
390 let result = ReportUseCase::execute(ReportRequest { compare });
391 insta::assert_json_snapshot!("report_warn", serde_json::to_value(&result.report).unwrap());
392 }
393
394 #[test]
395 fn snapshot_report_from_fail() {
396 let compare = create_fail_compare_receipt();
397 let result = ReportUseCase::execute(ReportRequest { compare });
398 insta::assert_json_snapshot!("report_fail", serde_json::to_value(&result.report).unwrap());
399 }
400
401 #[test]
402 fn snapshot_report_multi_metric_findings() {
403 let mut budgets = BTreeMap::new();
404 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
405 budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
406
407 let mut deltas = BTreeMap::new();
408 deltas.insert(
409 Metric::WallMs,
410 Delta {
411 baseline: 1000.0,
412 current: 1190.0,
413 ratio: 1.19,
414 pct: 0.19,
415 regression: 0.19,
416 cv: None,
417 noise_threshold: None,
418 statistic: MetricStatistic::Median,
419 significance: None,
420 status: MetricStatus::Warn,
421 },
422 );
423 deltas.insert(
424 Metric::MaxRssKb,
425 Delta {
426 baseline: 1024.0,
427 current: 1280.0,
428 ratio: 1.25,
429 pct: 0.25,
430 regression: 0.25,
431 cv: None,
432 noise_threshold: None,
433 statistic: MetricStatistic::Median,
434 significance: None,
435 status: MetricStatus::Fail,
436 },
437 );
438
439 let compare = CompareReceipt {
440 schema: COMPARE_SCHEMA_V1.to_string(),
441 tool: ToolInfo {
442 name: "perfgate".to_string(),
443 version: "0.1.0".to_string(),
444 },
445 bench: BenchMeta {
446 name: "multi-metric".to_string(),
447 cwd: None,
448 command: vec!["bench".to_string()],
449 repeat: 10,
450 warmup: 2,
451 work_units: None,
452 timeout_ms: None,
453 },
454 baseline_ref: CompareRef {
455 path: Some("baseline.json".to_string()),
456 run_id: Some("base-001".to_string()),
457 },
458 current_ref: CompareRef {
459 path: Some("current.json".to_string()),
460 run_id: Some("cur-001".to_string()),
461 },
462 budgets,
463 deltas,
464 verdict: Verdict {
465 status: VerdictStatus::Fail,
466 counts: VerdictCounts {
467 pass: 0,
468 warn: 1,
469 fail: 1,
470 skip: 0,
471 },
472 reasons: vec!["wall_ms_warn".to_string(), "max_rss_kb_fail".to_string()],
473 },
474 };
475
476 let result = ReportUseCase::execute(ReportRequest { compare });
477 insta::assert_json_snapshot!(
478 "report_multi_metric",
479 serde_json::to_value(&result.report).unwrap()
480 );
481 }
482
483 #[test]
484 fn test_report_is_deterministic() {
485 let compare = create_fail_compare_receipt();
486
487 let result1 = ReportUseCase::execute(ReportRequest {
488 compare: compare.clone(),
489 });
490 let result2 = ReportUseCase::execute(ReportRequest {
491 compare: compare.clone(),
492 });
493
494 let json1 = serde_json::to_string(&result1.report).unwrap();
495 let json2 = serde_json::to_string(&result2.report).unwrap();
496
497 assert_eq!(json1, json2, "Report output should be deterministic");
498 }
499
500 #[test]
501 fn test_finding_count_equals_warn_plus_fail() {
502 let mut budgets = BTreeMap::new();
503 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
504 budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
505
506 let mut deltas = BTreeMap::new();
507 deltas.insert(
508 Metric::WallMs,
509 Delta {
510 baseline: 1000.0,
511 current: 1190.0,
512 ratio: 1.19,
513 pct: 0.19,
514 regression: 0.19,
515 cv: None,
516 noise_threshold: None,
517 statistic: MetricStatistic::Median,
518 significance: None,
519 status: MetricStatus::Warn,
520 },
521 );
522 deltas.insert(
523 Metric::MaxRssKb,
524 Delta {
525 baseline: 1024.0,
526 current: 1280.0,
527 ratio: 1.25,
528 pct: 0.25,
529 regression: 0.25,
530 cv: None,
531 noise_threshold: None,
532 statistic: MetricStatistic::Median,
533 significance: None,
534 status: MetricStatus::Fail,
535 },
536 );
537
538 let compare = CompareReceipt {
539 schema: COMPARE_SCHEMA_V1.to_string(),
540 tool: ToolInfo {
541 name: "perfgate".to_string(),
542 version: "0.1.0".to_string(),
543 },
544 bench: BenchMeta {
545 name: "test-bench".to_string(),
546 cwd: None,
547 command: vec!["test".to_string()],
548 repeat: 5,
549 warmup: 0,
550 work_units: None,
551 timeout_ms: None,
552 },
553 baseline_ref: CompareRef {
554 path: None,
555 run_id: None,
556 },
557 current_ref: CompareRef {
558 path: None,
559 run_id: None,
560 },
561 budgets,
562 deltas,
563 verdict: Verdict {
564 status: VerdictStatus::Fail,
565 counts: VerdictCounts {
566 pass: 0,
567 warn: 1,
568 fail: 1,
569 skip: 0,
570 },
571 reasons: vec![],
572 },
573 };
574
575 let result = ReportUseCase::execute(ReportRequest { compare });
576
577 assert_eq!(result.report.findings.len(), 2);
579 assert_eq!(
580 result.report.findings.len(),
581 (result.report.summary.warn_count + result.report.summary.fail_count) as usize
582 );
583 }
584}
585
586#[cfg(test)]
587mod property_tests {
588 use super::*;
589 use perfgate_types::{
590 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, Metric,
591 MetricStatistic, MetricStatus, ToolInfo, Verdict, VerdictCounts, VerdictStatus,
592 };
593 use proptest::prelude::*;
594 use std::collections::BTreeMap;
595
596 fn non_empty_string() -> impl Strategy<Value = String> {
599 "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
600 }
601
602 fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
603 (non_empty_string(), non_empty_string())
604 .prop_map(|(name, version)| ToolInfo { name, version })
605 }
606
607 fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
608 (
609 non_empty_string(),
610 proptest::option::of(non_empty_string()),
611 proptest::collection::vec(non_empty_string(), 1..5),
612 1u32..100,
613 0u32..10,
614 proptest::option::of(1u64..10000),
615 proptest::option::of(100u64..60000),
616 )
617 .prop_map(
618 |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
619 name,
620 cwd,
621 command,
622 repeat,
623 warmup,
624 work_units,
625 timeout_ms,
626 },
627 )
628 }
629
630 fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
631 (
632 proptest::option::of(non_empty_string()),
633 proptest::option::of(non_empty_string()),
634 )
635 .prop_map(|(path, run_id)| CompareRef { path, run_id })
636 }
637
638 fn direction_strategy() -> impl Strategy<Value = Direction> {
639 prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
640 }
641
642 fn budget_strategy() -> impl Strategy<Value = Budget> {
643 (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
644 |(threshold, warn_factor, direction)| {
645 let warn_threshold = threshold * warn_factor;
646 Budget {
647 noise_threshold: None,
648 noise_policy: perfgate_types::NoisePolicy::Ignore,
649 threshold,
650 warn_threshold,
651 direction,
652 }
653 },
654 )
655 }
656
657 fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
658 prop_oneof![
659 Just(MetricStatus::Pass),
660 Just(MetricStatus::Warn),
661 Just(MetricStatus::Fail),
662 Just(MetricStatus::Skip),
663 ]
664 }
665
666 fn delta_strategy() -> impl Strategy<Value = Delta> {
667 (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
668 |(baseline, current, status)| {
669 let ratio = current / baseline;
670 let pct = (current - baseline) / baseline;
671 let regression = if pct > 0.0 { pct } else { 0.0 };
672 Delta {
673 baseline,
674 current,
675 ratio,
676 pct,
677 regression,
678 cv: None,
679 noise_threshold: None,
680 statistic: MetricStatistic::Median,
681 significance: None,
682 status,
683 }
684 },
685 )
686 }
687
688 fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
689 prop_oneof![
690 Just(VerdictStatus::Pass),
691 Just(VerdictStatus::Warn),
692 Just(VerdictStatus::Fail),
693 Just(VerdictStatus::Skip),
694 ]
695 }
696
697 fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
698 (0u32..10, 0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail, skip)| {
699 VerdictCounts {
700 pass,
701 warn,
702 fail,
703 skip,
704 }
705 })
706 }
707
708 fn verdict_strategy() -> impl Strategy<Value = Verdict> {
709 (
710 verdict_status_strategy(),
711 verdict_counts_strategy(),
712 proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
713 )
714 .prop_map(|(status, counts, reasons)| Verdict {
715 status,
716 counts,
717 reasons,
718 })
719 }
720
721 fn metric_strategy() -> impl Strategy<Value = Metric> {
722 prop_oneof![
723 Just(Metric::WallMs),
724 Just(Metric::MaxRssKb),
725 Just(Metric::ThroughputPerS),
726 ]
727 }
728
729 fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
730 proptest::collection::btree_map(metric_strategy(), budget_strategy(), 0..4)
731 }
732
733 fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
734 proptest::collection::btree_map(metric_strategy(), delta_strategy(), 0..4)
735 }
736
737 fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
738 (
739 tool_info_strategy(),
740 bench_meta_strategy(),
741 compare_ref_strategy(),
742 compare_ref_strategy(),
743 budgets_map_strategy(),
744 deltas_map_strategy(),
745 verdict_strategy(),
746 )
747 .prop_map(
748 |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
749 CompareReceipt {
750 schema: COMPARE_SCHEMA_V1.to_string(),
751 tool,
752 bench,
753 baseline_ref,
754 current_ref,
755 budgets,
756 deltas,
757 verdict,
758 }
759 },
760 )
761 }
762
763 proptest! {
764 #![proptest_config(ProptestConfig::with_cases(100))]
765
766 #[test]
768 fn report_verdict_matches_compare_verdict(compare in compare_receipt_strategy()) {
769 let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
770
771 prop_assert_eq!(
772 result.report.verdict.status,
773 compare.verdict.status,
774 "Report verdict should match compare verdict"
775 );
776 }
777
778 #[test]
780 fn finding_count_equals_warn_plus_fail(compare in compare_receipt_strategy()) {
781 let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
782
783 let warn_fail_count = compare.deltas.values()
784 .filter(|d| d.status == MetricStatus::Warn || d.status == MetricStatus::Fail)
785 .count();
786
787 prop_assert_eq!(
788 result.report.findings.len(),
789 warn_fail_count,
790 "Finding count should equal warn + fail delta count"
791 );
792 }
793
794 #[test]
796 fn report_is_deterministic(compare in compare_receipt_strategy()) {
797 let result1 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
798 let result2 = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
799
800 let json1 = serde_json::to_string(&result1.report).unwrap();
801 let json2 = serde_json::to_string(&result2.report).unwrap();
802
803 prop_assert_eq!(json1, json2, "Report output should be deterministic");
804 }
805
806 #[test]
808 fn report_type_is_always_v1(compare in compare_receipt_strategy()) {
809 let result = ReportUseCase::execute(ReportRequest { compare });
810
811 prop_assert_eq!(
812 result.report.report_type,
813 REPORT_SCHEMA_V1,
814 "Report type should always be perfgate.report.v1"
815 );
816 }
817
818 #[test]
820 fn summary_counts_match_verdict_counts(compare in compare_receipt_strategy()) {
821 let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
822
823 prop_assert_eq!(
824 result.report.summary.pass_count,
825 compare.verdict.counts.pass,
826 "Summary pass count should match verdict counts"
827 );
828 prop_assert_eq!(
829 result.report.summary.warn_count,
830 compare.verdict.counts.warn,
831 "Summary warn count should match verdict counts"
832 );
833 prop_assert_eq!(
834 result.report.summary.fail_count,
835 compare.verdict.counts.fail,
836 "Summary fail count should match verdict counts"
837 );
838 prop_assert_eq!(
839 result.report.summary.skip_count,
840 compare.verdict.counts.skip,
841 "Summary skip count should match verdict counts"
842 );
843 }
844
845 #[test]
847 fn findings_have_correct_severity(compare in compare_receipt_strategy()) {
848 let result = ReportUseCase::execute(ReportRequest { compare: compare.clone() });
849
850 for finding in &result.report.findings {
851 match finding.code.as_str() {
852 "metric_fail" => {
853 prop_assert_eq!(
854 finding.severity,
855 Severity::Fail,
856 "metric_fail findings should have Fail severity"
857 );
858 }
859 "metric_warn" => {
860 prop_assert_eq!(
861 finding.severity,
862 Severity::Warn,
863 "metric_warn findings should have Warn severity"
864 );
865 }
866 _ => {
867 prop_assert!(false, "Unexpected finding code: {}", finding.code);
868 }
869 }
870 }
871 }
872 }
873}