Skip to main content

exspec_core/
output.rs

1use std::collections::HashSet;
2
3use crate::metrics::ProjectMetrics;
4use crate::rules::{Diagnostic, Severity};
5
6#[derive(Debug, Clone, Copy, PartialEq, Eq)]
7pub enum OutputFormat {
8    Terminal,
9    Json,
10    Sarif,
11}
12
13/// Count unique violated functions by (file, line) pairs.
14/// Only per-function diagnostics (line=Some) are counted.
15fn count_violated_functions(diagnostics: &[Diagnostic]) -> usize {
16    diagnostics
17        .iter()
18        .filter_map(|d| d.line.map(|l| (d.file.as_str(), l)))
19        .collect::<HashSet<_>>()
20        .len()
21}
22
23pub fn format_terminal(
24    diagnostics: &[Diagnostic],
25    file_count: usize,
26    function_count: usize,
27    metrics: &ProjectMetrics,
28) -> String {
29    let mut lines = Vec::new();
30
31    lines.push(format!(
32        "exspec v{} -- {} test files, {} test functions",
33        env!("CARGO_PKG_VERSION"),
34        file_count,
35        function_count,
36    ));
37
38    if file_count == 0 {
39        lines.push("No test files found. Check --lang filter or run from a directory containing test files.".to_string());
40    }
41
42    for d in diagnostics {
43        let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
44        lines.push(format!(
45            "{} {}{} {} {}",
46            d.severity, d.file, line_str, d.rule, d.message,
47        ));
48    }
49
50    // Metrics section
51    lines.push("Metrics:".to_string());
52    lines.push(format!(
53        "  Mock density:      {:.1}/test (avg), {} distinct classes/test (max)",
54        metrics.mock_density_avg, metrics.mock_class_max,
55    ));
56
57    let total_functions_for_param = if function_count > 0 {
58        let count = (metrics.parameterized_ratio * function_count as f64).round() as usize;
59        format!("{count}/{function_count}")
60    } else {
61        "0/0".to_string()
62    };
63    lines.push(format!(
64        "  Parameterized:     {:.0}% ({})",
65        metrics.parameterized_ratio * 100.0,
66        total_functions_for_param,
67    ));
68
69    let pbt_files = (metrics.pbt_ratio * file_count as f64).round() as usize;
70    lines.push(format!(
71        "  PBT usage:         {:.0}% ({}/{} files)",
72        metrics.pbt_ratio * 100.0,
73        pbt_files,
74        file_count,
75    ));
76
77    lines.push(format!(
78        "  Assertion density: {:.1}/test (avg)",
79        metrics.assertion_density_avg,
80    ));
81
82    let contract_files = (metrics.contract_coverage * file_count as f64).round() as usize;
83    lines.push(format!(
84        "  Contract coverage: {:.0}% ({}/{} files)",
85        metrics.contract_coverage * 100.0,
86        contract_files,
87        file_count,
88    ));
89
90    // Score section
91    let block_count = diagnostics
92        .iter()
93        .filter(|d| d.severity == Severity::Block)
94        .count();
95    let warn_count = diagnostics
96        .iter()
97        .filter(|d| d.severity == Severity::Warn)
98        .count();
99    let info_count = diagnostics
100        .iter()
101        .filter(|d| d.severity == Severity::Info)
102        .count();
103    let violated = count_violated_functions(diagnostics);
104    let pass_count = function_count.saturating_sub(violated);
105    lines.push(format!(
106        "Score: BLOCK {block_count} | WARN {warn_count} | INFO {info_count} | PASS {pass_count}",
107    ));
108
109    lines.join("\n")
110}
111
112pub fn format_json(
113    diagnostics: &[Diagnostic],
114    file_count: usize,
115    function_count: usize,
116    metrics: &ProjectMetrics,
117    unfiltered_summary: Option<&SummaryStats>,
118) -> String {
119    let (block_count, warn_count, info_count, pass_count) = if let Some(stats) = unfiltered_summary
120    {
121        (
122            stats.block_count,
123            stats.warn_count,
124            stats.info_count,
125            stats.pass_count,
126        )
127    } else {
128        let block_count = diagnostics
129            .iter()
130            .filter(|d| d.severity == Severity::Block)
131            .count();
132        let warn_count = diagnostics
133            .iter()
134            .filter(|d| d.severity == Severity::Warn)
135            .count();
136        let info_count = diagnostics
137            .iter()
138            .filter(|d| d.severity == Severity::Info)
139            .count();
140        let violated = count_violated_functions(diagnostics);
141        let pass_count = function_count.saturating_sub(violated);
142        (block_count, warn_count, info_count, pass_count)
143    };
144
145    let mut output = serde_json::json!({
146        "version": env!("CARGO_PKG_VERSION"),
147        "summary": {
148            "files": file_count,
149            "functions": function_count,
150            "block": block_count,
151            "warn": warn_count,
152            "info": info_count,
153            "pass": pass_count,
154        },
155        "diagnostics": diagnostics,
156        "metrics": serde_json::to_value(metrics).unwrap_or_default(),
157    });
158
159    if file_count == 0 {
160        output["guidance"] = serde_json::json!("No test files found. Check --lang filter or run from a directory containing test files.");
161    }
162    serde_json::to_string_pretty(&output).unwrap_or_else(|_| "{}".to_string())
163}
164
165struct RuleMeta {
166    id: &'static str,
167    name: &'static str,
168    short_description: &'static str,
169}
170
171const RULE_REGISTRY: &[RuleMeta] = &[
172    RuleMeta {
173        id: "T001",
174        name: "assertion-free",
175        short_description: "Test function has no assertions",
176    },
177    RuleMeta {
178        id: "T002",
179        name: "mock-overuse",
180        short_description: "Test function uses too many mocks",
181    },
182    RuleMeta {
183        id: "T003",
184        name: "giant-test",
185        short_description: "Test function exceeds line count threshold",
186    },
187    RuleMeta {
188        id: "T004",
189        name: "no-parameterized",
190        short_description: "Low ratio of parameterized tests",
191    },
192    RuleMeta {
193        id: "T005",
194        name: "pbt-missing",
195        short_description: "No property-based testing library imported",
196    },
197    RuleMeta {
198        id: "T006",
199        name: "low-assertion-density",
200        short_description: "Low assertion count per test function",
201    },
202    RuleMeta {
203        id: "T007",
204        name: "test-source-ratio",
205        short_description: "Test file to source file ratio",
206    },
207    RuleMeta {
208        id: "T008",
209        name: "no-contract",
210        short_description: "No contract testing library used in tests",
211    },
212    RuleMeta {
213        id: "T101",
214        name: "how-not-what",
215        short_description: "Test verifies implementation rather than behavior",
216    },
217    RuleMeta {
218        id: "T102",
219        name: "fixture-sprawl",
220        short_description: "Test depends on too many fixtures",
221    },
222    RuleMeta {
223        id: "T103",
224        name: "missing-error-test",
225        short_description: "No error/exception test found in file",
226    },
227    RuleMeta {
228        id: "T105",
229        name: "deterministic-no-metamorphic",
230        short_description: "All assertions use exact equality, no relational checks",
231    },
232    RuleMeta {
233        id: "T106",
234        name: "duplicate-literal-assertion",
235        short_description: "Same literal appears multiple times in assertions",
236    },
237    RuleMeta {
238        id: "T107",
239        name: "assertion-roulette",
240        short_description: "Multiple assertions without failure messages",
241    },
242    RuleMeta {
243        id: "T108",
244        name: "wait-and-see",
245        short_description: "Test uses sleep/delay causing flaky tests",
246    },
247    RuleMeta {
248        id: "T109",
249        name: "undescriptive-test-name",
250        short_description: "Test name does not describe behavior",
251    },
252    RuleMeta {
253        id: "T110",
254        name: "skip-only-test",
255        short_description: "Test skips or marks incomplete without assertions",
256    },
257];
258
259pub fn format_sarif(diagnostics: &[Diagnostic]) -> String {
260    use serde_sarif::sarif;
261
262    let rules: Vec<sarif::ReportingDescriptor> = RULE_REGISTRY
263        .iter()
264        .map(|r| {
265            sarif::ReportingDescriptor::builder()
266                .id(r.id)
267                .name(r.name)
268                .short_description(&String::from(r.short_description))
269                .build()
270        })
271        .collect();
272
273    let results: Vec<sarif::Result> = diagnostics
274        .iter()
275        .map(|d| {
276            let level = match d.severity {
277                Severity::Block => sarif::ResultLevel::Error,
278                Severity::Warn => sarif::ResultLevel::Warning,
279                Severity::Info => sarif::ResultLevel::Note,
280            };
281            let start_line = d.line.unwrap_or(1) as i64;
282            let location = sarif::Location::builder()
283                .physical_location(
284                    sarif::PhysicalLocation::builder()
285                        .artifact_location(sarif::ArtifactLocation::builder().uri(&d.file).build())
286                        .region(sarif::Region::builder().start_line(start_line).build())
287                        .build(),
288                )
289                .build();
290
291            sarif::Result::builder()
292                .rule_id(&d.rule.0)
293                .message(sarif::Message::builder().text(&d.message).build())
294                .level(level)
295                .locations(vec![location])
296                .build()
297        })
298        .collect();
299
300    let tool_component = sarif::ToolComponent::builder()
301        .name("exspec")
302        .version(env!("CARGO_PKG_VERSION"))
303        .rules(rules)
304        .build();
305
306    let invocation = sarif::Invocation::builder()
307        .execution_successful(true)
308        .build();
309
310    let run = sarif::Run::builder()
311        .tool(tool_component)
312        .results(results)
313        .invocations(vec![invocation])
314        .build();
315
316    let sarif_doc = sarif::Sarif::builder()
317        .version(sarif::Version::V2_1_0.to_string())
318        .schema(sarif::SCHEMA_URL)
319        .runs(vec![run])
320        .build();
321
322    serde_json::to_string_pretty(&sarif_doc).unwrap_or_else(|_| "{}".to_string())
323}
324
325/// Filter diagnostics to only include those at or above the given minimum severity.
326pub fn filter_by_severity(diagnostics: &[Diagnostic], min: Severity) -> Vec<Diagnostic> {
327    diagnostics
328        .iter()
329        .filter(|d| d.severity >= min)
330        .cloned()
331        .collect()
332}
333
334/// Summary statistics from unfiltered diagnostics, for JSON/SARIF output.
335#[derive(Debug, Clone, PartialEq, Eq)]
336pub struct SummaryStats {
337    pub block_count: usize,
338    pub warn_count: usize,
339    pub info_count: usize,
340    pub pass_count: usize,
341}
342
343impl SummaryStats {
344    pub fn from_diagnostics(diagnostics: &[Diagnostic], function_count: usize) -> Self {
345        let block_count = diagnostics
346            .iter()
347            .filter(|d| d.severity == Severity::Block)
348            .count();
349        let warn_count = diagnostics
350            .iter()
351            .filter(|d| d.severity == Severity::Warn)
352            .count();
353        let info_count = diagnostics
354            .iter()
355            .filter(|d| d.severity == Severity::Info)
356            .count();
357        let violated = count_violated_functions(diagnostics);
358        let pass_count = function_count.saturating_sub(violated);
359        Self {
360            block_count,
361            warn_count,
362            info_count,
363            pass_count,
364        }
365    }
366}
367
368pub fn compute_exit_code(diagnostics: &[Diagnostic], strict: bool) -> i32 {
369    for d in diagnostics {
370        if d.severity == Severity::Block {
371            return 1;
372        }
373    }
374    if strict {
375        for d in diagnostics {
376            if d.severity == Severity::Warn {
377                return 1;
378            }
379        }
380    }
381    0
382}
383
384#[cfg(test)]
385mod tests {
386    use super::*;
387    use crate::rules::RuleId;
388
389    fn block_diag() -> Diagnostic {
390        Diagnostic {
391            rule: RuleId::new("T001"),
392            severity: Severity::Block,
393            file: "test.py".to_string(),
394            line: Some(10),
395            message: "assertion-free: test has no assertions".to_string(),
396            details: None,
397        }
398    }
399
400    fn warn_diag() -> Diagnostic {
401        Diagnostic {
402            rule: RuleId::new("T003"),
403            severity: Severity::Warn,
404            file: "test.py".to_string(),
405            line: Some(5),
406            message: "giant-test: 73 lines, threshold: 50".to_string(),
407            details: None,
408        }
409    }
410
411    // --- Terminal format ---
412
413    #[test]
414    fn terminal_format_has_summary_header() {
415        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
416        assert!(output.starts_with("exspec v"));
417        assert!(output.contains("1 test files"));
418        assert!(output.contains("1 test functions"));
419    }
420
421    #[test]
422    fn terminal_format_has_score_footer() {
423        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
424        assert!(output.contains("Score: BLOCK 1 | WARN 0 | INFO 0 | PASS 0"));
425    }
426
427    #[test]
428    fn terminal_format_block() {
429        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
430        assert!(output.contains("BLOCK test.py:10 T001 assertion-free: test has no assertions"));
431    }
432
433    #[test]
434    fn terminal_format_warn() {
435        let output = format_terminal(&[warn_diag()], 1, 1, &ProjectMetrics::default());
436        assert!(output.contains("WARN test.py:5 T003 giant-test: 73 lines, threshold: 50"));
437    }
438
439    #[test]
440    fn terminal_format_multiple() {
441        let output = format_terminal(
442            &[block_diag(), warn_diag()],
443            2,
444            2,
445            &ProjectMetrics::default(),
446        );
447        assert!(output.contains("BLOCK"));
448        assert!(output.contains("WARN"));
449    }
450
451    #[test]
452    fn terminal_format_empty_has_header_and_footer() {
453        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default());
454        assert!(output.contains("exspec v"));
455        assert!(output.contains("Score:"));
456    }
457
458    // --- JSON format ---
459
460    #[test]
461    fn json_format_has_version_and_summary() {
462        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None);
463        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
464        assert!(parsed["version"].is_string());
465        assert!(parsed["summary"].is_object());
466        assert_eq!(parsed["summary"]["files"], 1);
467        assert_eq!(parsed["summary"]["functions"], 1);
468        assert_eq!(parsed["summary"]["block"], 1);
469        assert_eq!(parsed["summary"]["warn"], 0);
470        assert_eq!(parsed["summary"]["pass"], 0);
471    }
472
473    #[test]
474    fn json_format_has_diagnostics_and_metrics() {
475        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None);
476        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
477        assert!(parsed["diagnostics"].is_array());
478        assert!(parsed["metrics"].is_object());
479        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 1);
480    }
481
482    #[test]
483    fn json_format_empty() {
484        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None);
485        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
486        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 0);
487        assert_eq!(parsed["summary"]["functions"], 0);
488    }
489
490    // --- Exit code ---
491
492    // --- Empty result UX ---
493
494    #[test]
495    fn terminal_format_zero_files_shows_guidance() {
496        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default());
497        assert!(
498            output.contains("No test files found"),
499            "expected guidance message, got: {output}"
500        );
501    }
502
503    #[test]
504    fn json_format_zero_files_has_guidance() {
505        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None);
506        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
507        assert!(parsed["guidance"].is_string());
508    }
509
510    // --- pass_count multi-violation ---
511
512    #[test]
513    fn pass_count_with_multi_violation_function() {
514        let d1 = Diagnostic {
515            rule: RuleId::new("T001"),
516            severity: Severity::Block,
517            file: "test.py".to_string(),
518            line: Some(10),
519            message: "assertion-free".to_string(),
520            details: None,
521        };
522        let d2 = Diagnostic {
523            rule: RuleId::new("T003"),
524            severity: Severity::Warn,
525            file: "test.py".to_string(),
526            line: Some(10),
527            message: "giant-test".to_string(),
528            details: None,
529        };
530        let output = format_terminal(&[d1, d2], 1, 2, &ProjectMetrics::default());
531        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
532    }
533
534    #[test]
535    fn pass_count_excludes_file_level_diagnostics() {
536        let d1 = Diagnostic {
537            rule: RuleId::new("T004"),
538            severity: Severity::Info,
539            file: "test.py".to_string(),
540            line: None,
541            message: "no-parameterized".to_string(),
542            details: None,
543        };
544        let output = format_terminal(&[d1], 1, 1, &ProjectMetrics::default());
545        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
546    }
547
548    #[test]
549    fn terminal_format_nonzero_files_no_guidance() {
550        let output = format_terminal(&[], 1, 0, &ProjectMetrics::default());
551        assert!(!output.contains("No test files found"));
552    }
553
554    #[test]
555    fn exit_code_block_returns_1() {
556        assert_eq!(compute_exit_code(&[block_diag()], false), 1);
557    }
558
559    #[test]
560    fn exit_code_warn_only_returns_0() {
561        assert_eq!(compute_exit_code(&[warn_diag()], false), 0);
562    }
563
564    #[test]
565    fn exit_code_strict_warn_returns_1() {
566        assert_eq!(compute_exit_code(&[warn_diag()], true), 1);
567    }
568
569    #[test]
570    fn exit_code_empty_returns_0() {
571        assert_eq!(compute_exit_code(&[], false), 0);
572    }
573
574    // --- Metrics display ---
575
576    #[test]
577    fn terminal_metrics_section_between_diagnostics_and_score() {
578        let metrics = ProjectMetrics {
579            mock_density_avg: 2.3,
580            mock_class_max: 4,
581            parameterized_ratio: 0.15,
582            pbt_ratio: 0.4,
583            assertion_density_avg: 1.8,
584            contract_coverage: 0.2,
585            ..Default::default()
586        };
587        let output = format_terminal(&[block_diag()], 5, 187, &metrics);
588        let metrics_pos = output.find("Metrics:").expect("Metrics section missing");
589        let diag_pos = output.find("BLOCK test.py").expect("diagnostic missing");
590        let score_pos = output.find("Score:").expect("Score missing");
591        assert!(
592            diag_pos < metrics_pos,
593            "Metrics should come after diagnostics"
594        );
595        assert!(metrics_pos < score_pos, "Metrics should come before Score");
596    }
597
598    #[test]
599    fn terminal_metrics_mock_density_line() {
600        let metrics = ProjectMetrics {
601            mock_density_avg: 2.3,
602            mock_class_max: 4,
603            ..Default::default()
604        };
605        let output = format_terminal(&[], 1, 1, &metrics);
606        assert!(
607            output.contains("2.3/test (avg)"),
608            "mock density avg: {output}"
609        );
610        assert!(
611            output.contains("4 distinct classes/test (max)"),
612            "mock class max: {output}"
613        );
614    }
615
616    #[test]
617    fn terminal_metrics_parameterized_line() {
618        let metrics = ProjectMetrics {
619            parameterized_ratio: 0.15,
620            ..Default::default()
621        };
622        let output = format_terminal(&[], 5, 20, &metrics);
623        assert!(output.contains("15%"), "parameterized pct: {output}");
624        assert!(output.contains("3/20"), "parameterized fraction: {output}");
625    }
626
627    #[test]
628    fn terminal_metrics_pbt_and_contract_file_count() {
629        let metrics = ProjectMetrics {
630            pbt_ratio: 0.4,
631            contract_coverage: 0.2,
632            ..Default::default()
633        };
634        let output = format_terminal(&[], 5, 1, &metrics);
635        assert!(output.contains("2/5 files"), "pbt files: {output}");
636        assert!(output.contains("1/5 files"), "contract files: {output}");
637    }
638
639    #[test]
640    fn json_metrics_has_all_fields() {
641        let metrics = ProjectMetrics {
642            mock_density_avg: 1.5,
643            mock_class_max: 2,
644            parameterized_ratio: 0.3,
645            pbt_ratio: 0.5,
646            assertion_density_avg: 2.0,
647            contract_coverage: 0.1,
648            test_source_ratio: 0.8,
649        };
650        let output = format_json(&[], 1, 1, &metrics, None);
651        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
652        let m = &parsed["metrics"];
653        assert_eq!(m["mock_density_avg"], 1.5);
654        assert_eq!(m["mock_class_max"], 2);
655        assert_eq!(m["parameterized_ratio"], 0.3);
656        assert_eq!(m["pbt_ratio"], 0.5);
657        assert_eq!(m["assertion_density_avg"], 2.0);
658        assert_eq!(m["contract_coverage"], 0.1);
659        assert_eq!(m["test_source_ratio"], 0.8);
660    }
661
662    #[test]
663    fn json_metrics_values_are_numbers() {
664        let metrics = ProjectMetrics {
665            mock_density_avg: 1.0,
666            mock_class_max: 3,
667            ..Default::default()
668        };
669        let output = format_json(&[], 1, 1, &metrics, None);
670        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
671        assert!(parsed["metrics"]["mock_density_avg"].is_number());
672        assert!(parsed["metrics"]["mock_class_max"].is_number());
673    }
674
675    // --- SARIF format ---
676
677    fn info_diag() -> Diagnostic {
678        Diagnostic {
679            rule: RuleId::new("T005"),
680            severity: Severity::Info,
681            file: "test.py".to_string(),
682            line: None,
683            message: "pbt-missing".to_string(),
684            details: None,
685        }
686    }
687
688    fn parse_sarif(output: &str) -> serde_json::Value {
689        serde_json::from_str(output).expect("SARIF should be valid JSON")
690    }
691
692    #[test]
693    fn sarif_valid_json() {
694        let output = format_sarif(&[block_diag()]);
695        parse_sarif(&output);
696    }
697
698    #[test]
699    fn sarif_has_schema_url() {
700        let output = format_sarif(&[]);
701        let parsed = parse_sarif(&output);
702        assert!(parsed["$schema"].is_string());
703        assert!(parsed["$schema"].as_str().unwrap().contains("sarif"));
704    }
705
706    #[test]
707    fn sarif_version_2_1_0() {
708        let output = format_sarif(&[]);
709        let parsed = parse_sarif(&output);
710        assert_eq!(parsed["version"], "2.1.0");
711    }
712
713    #[test]
714    fn sarif_tool_driver_name() {
715        let output = format_sarif(&[]);
716        let parsed = parse_sarif(&output);
717        assert_eq!(parsed["runs"][0]["tool"]["driver"]["name"], "exspec");
718    }
719
720    #[test]
721    fn sarif_tool_driver_version() {
722        let output = format_sarif(&[]);
723        let parsed = parse_sarif(&output);
724        assert_eq!(
725            parsed["runs"][0]["tool"]["driver"]["version"],
726            env!("CARGO_PKG_VERSION")
727        );
728    }
729
730    #[test]
731    fn sarif_rules_match_registry_count() {
732        let output = format_sarif(&[]);
733        let parsed = parse_sarif(&output);
734        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
735            .as_array()
736            .unwrap();
737        assert_eq!(rules.len(), RULE_REGISTRY.len());
738    }
739
740    #[test]
741    fn sarif_rules_have_short_description() {
742        let output = format_sarif(&[]);
743        let parsed = parse_sarif(&output);
744        let rule0 = &parsed["runs"][0]["tool"]["driver"]["rules"][0];
745        assert!(rule0["shortDescription"].is_object());
746        assert!(rule0["shortDescription"]["text"].is_string());
747    }
748
749    #[test]
750    fn sarif_rules_include_all_registry_entries() {
751        let output = format_sarif(&[]);
752        let parsed = parse_sarif(&output);
753        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
754            .as_array()
755            .unwrap();
756        for meta in RULE_REGISTRY {
757            assert!(
758                rules.iter().any(|rule| rule["id"] == meta.id),
759                "SARIF rules array should include {} metadata",
760                meta.id
761            );
762        }
763    }
764
765    #[test]
766    fn sarif_block_maps_to_error() {
767        let output = format_sarif(&[block_diag()]);
768        let parsed = parse_sarif(&output);
769        assert_eq!(parsed["runs"][0]["results"][0]["level"], "error");
770    }
771
772    #[test]
773    fn sarif_warn_maps_to_warning() {
774        let output = format_sarif(&[warn_diag()]);
775        let parsed = parse_sarif(&output);
776        assert_eq!(parsed["runs"][0]["results"][0]["level"], "warning");
777    }
778
779    #[test]
780    fn sarif_info_maps_to_note() {
781        let output = format_sarif(&[info_diag()]);
782        let parsed = parse_sarif(&output);
783        assert_eq!(parsed["runs"][0]["results"][0]["level"], "note");
784    }
785
786    #[test]
787    fn sarif_file_level_diag_start_line_1() {
788        let output = format_sarif(&[info_diag()]);
789        let parsed = parse_sarif(&output);
790        let region = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"]["region"];
791        assert_eq!(region["startLine"], 1);
792    }
793
794    #[test]
795    fn sarif_result_has_location_and_uri() {
796        let output = format_sarif(&[block_diag()]);
797        let parsed = parse_sarif(&output);
798        let loc = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"];
799        assert_eq!(loc["artifactLocation"]["uri"], "test.py");
800        assert_eq!(loc["region"]["startLine"], 10);
801    }
802
803    #[test]
804    fn sarif_empty_diagnostics_empty_results() {
805        let output = format_sarif(&[]);
806        let parsed = parse_sarif(&output);
807        let results = parsed["runs"][0]["results"].as_array().unwrap();
808        assert!(results.is_empty());
809        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
810            .as_array()
811            .unwrap();
812        assert_eq!(rules.len(), RULE_REGISTRY.len());
813    }
814
815    // --- #59: filter_by_severity ---
816
817    #[test]
818    fn filter_by_severity_block_keeps_only_block() {
819        let diags = vec![block_diag(), warn_diag(), info_diag()];
820        let filtered = filter_by_severity(&diags, Severity::Block);
821        assert_eq!(filtered.len(), 1);
822        assert_eq!(filtered[0].severity, Severity::Block);
823    }
824
825    #[test]
826    fn filter_by_severity_warn_keeps_block_and_warn() {
827        let diags = vec![block_diag(), warn_diag(), info_diag()];
828        let filtered = filter_by_severity(&diags, Severity::Warn);
829        assert_eq!(filtered.len(), 2);
830    }
831
832    #[test]
833    fn filter_by_severity_info_keeps_all() {
834        let diags = vec![block_diag(), warn_diag(), info_diag()];
835        let filtered = filter_by_severity(&diags, Severity::Info);
836        assert_eq!(filtered.len(), 3);
837    }
838
839    #[test]
840    fn filter_by_severity_empty_input() {
841        let filtered = filter_by_severity(&[], Severity::Block);
842        assert!(filtered.is_empty());
843    }
844
845    // --- #59: SummaryStats ---
846
847    #[test]
848    fn summary_stats_from_diagnostics() {
849        let diags = vec![block_diag(), warn_diag(), info_diag()];
850        let stats = SummaryStats::from_diagnostics(&diags, 5);
851        assert_eq!(stats.block_count, 1);
852        assert_eq!(stats.warn_count, 1);
853        assert_eq!(stats.info_count, 1);
854        // 3 distinct (file,line) violated out of 5 → pass=3 except info_diag has line=None
855        // block_diag line=10, warn_diag line=5, info_diag line=None → 2 violated → pass=3
856        assert_eq!(stats.pass_count, 3);
857    }
858
859    // --- #59: Terminal filtered score ---
860
861    #[test]
862    fn terminal_format_filtered_shows_filtered_counts() {
863        let filtered = vec![block_diag()]; // original had block+warn+info but filtered to block only
864        let output = format_terminal(&filtered, 1, 3, &ProjectMetrics::default());
865        assert!(
866            output.contains("BLOCK 1 | WARN 0 | INFO 0"),
867            "score should reflect filtered diagnostics: {output}"
868        );
869        assert!(!output.contains("WARN test.py"));
870    }
871
872    // --- #59: JSON filtered array + unfiltered summary ---
873
874    #[test]
875    fn json_filtered_diagnostics_with_unfiltered_summary() {
876        let filtered = vec![block_diag()];
877        let stats = SummaryStats {
878            block_count: 1,
879            warn_count: 1,
880            info_count: 1,
881            pass_count: 2,
882        };
883        let output = format_json(&filtered, 1, 5, &ProjectMetrics::default(), Some(&stats));
884        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
885        assert_eq!(
886            parsed["diagnostics"].as_array().unwrap().len(),
887            1,
888            "diagnostics should be filtered"
889        );
890        assert_eq!(parsed["summary"]["warn"], 1, "summary should be unfiltered");
891        assert_eq!(parsed["summary"]["info"], 1, "summary should be unfiltered");
892    }
893
894    #[test]
895    fn sarif_invocations_execution_successful() {
896        let output = format_sarif(&[]);
897        let parsed = parse_sarif(&output);
898        assert_eq!(
899            parsed["runs"][0]["invocations"][0]["executionSuccessful"],
900            true
901        );
902    }
903}