Skip to main content

exspec_core/
output.rs

1use std::collections::HashSet;
2
3use crate::hints::Hint;
4use crate::metrics::ProjectMetrics;
5use crate::rules::{Diagnostic, Severity};
6
7#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8pub enum OutputFormat {
9    Terminal,
10    Json,
11    Sarif,
12}
13
14/// Count unique violated functions by (file, line) pairs.
15/// Only per-function diagnostics (line=Some) are counted.
16fn count_violated_functions(diagnostics: &[Diagnostic]) -> usize {
17    diagnostics
18        .iter()
19        .filter_map(|d| d.line.map(|l| (d.file.as_str(), l)))
20        .collect::<HashSet<_>>()
21        .len()
22}
23
24pub fn format_terminal(
25    diagnostics: &[Diagnostic],
26    file_count: usize,
27    function_count: usize,
28    metrics: &ProjectMetrics,
29    hints: &[Hint],
30) -> String {
31    let mut lines = Vec::new();
32
33    lines.push(format!(
34        "exspec v{} -- {} test files, {} test functions",
35        env!("CARGO_PKG_VERSION"),
36        file_count,
37        function_count,
38    ));
39
40    if file_count == 0 {
41        lines.push("No test files found. Check --lang filter or run from a directory containing test files.".to_string());
42    }
43
44    for d in diagnostics {
45        let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
46        lines.push(format!(
47            "{} {}{} {} {}",
48            d.severity, d.file, line_str, d.rule, d.message,
49        ));
50    }
51
52    // Metrics section
53    lines.push("Metrics:".to_string());
54    lines.push(format!(
55        "  Mock density:      {:.1}/test (avg), {} distinct classes/test (max)",
56        metrics.mock_density_avg, metrics.mock_class_max,
57    ));
58
59    let total_functions_for_param = if function_count > 0 {
60        let count = (metrics.parameterized_ratio * function_count as f64).round() as usize;
61        format!("{count}/{function_count}")
62    } else {
63        "0/0".to_string()
64    };
65    lines.push(format!(
66        "  Parameterized:     {:.0}% ({})",
67        metrics.parameterized_ratio * 100.0,
68        total_functions_for_param,
69    ));
70
71    let pbt_files = (metrics.pbt_ratio * file_count as f64).round() as usize;
72    lines.push(format!(
73        "  PBT usage:         {:.0}% ({}/{} files)",
74        metrics.pbt_ratio * 100.0,
75        pbt_files,
76        file_count,
77    ));
78
79    lines.push(format!(
80        "  Assertion density: {:.1}/test (avg)",
81        metrics.assertion_density_avg,
82    ));
83
84    let contract_files = (metrics.contract_coverage * file_count as f64).round() as usize;
85    lines.push(format!(
86        "  Contract coverage: {:.0}% ({}/{} files)",
87        metrics.contract_coverage * 100.0,
88        contract_files,
89        file_count,
90    ));
91
92    // Score section
93    let block_count = diagnostics
94        .iter()
95        .filter(|d| d.severity == Severity::Block)
96        .count();
97    let warn_count = diagnostics
98        .iter()
99        .filter(|d| d.severity == Severity::Warn)
100        .count();
101    let info_count = diagnostics
102        .iter()
103        .filter(|d| d.severity == Severity::Info)
104        .count();
105    let violated = count_violated_functions(diagnostics);
106    let pass_count = function_count.saturating_sub(violated);
107    lines.push(format!(
108        "Score: BLOCK {block_count} | WARN {warn_count} | INFO {info_count} | PASS {pass_count}",
109    ));
110
111    for hint in hints {
112        lines.push(format!("Hint [{}] {}", hint.rule, hint.title));
113        lines.push(format!("  {}", hint.message));
114    }
115
116    lines.join("\n")
117}
118
119pub fn format_json(
120    diagnostics: &[Diagnostic],
121    file_count: usize,
122    function_count: usize,
123    metrics: &ProjectMetrics,
124    unfiltered_summary: Option<&SummaryStats>,
125    hints: &[Hint],
126) -> String {
127    let (block_count, warn_count, info_count, pass_count) = if let Some(stats) = unfiltered_summary
128    {
129        (
130            stats.block_count,
131            stats.warn_count,
132            stats.info_count,
133            stats.pass_count,
134        )
135    } else {
136        let block_count = diagnostics
137            .iter()
138            .filter(|d| d.severity == Severity::Block)
139            .count();
140        let warn_count = diagnostics
141            .iter()
142            .filter(|d| d.severity == Severity::Warn)
143            .count();
144        let info_count = diagnostics
145            .iter()
146            .filter(|d| d.severity == Severity::Info)
147            .count();
148        let violated = count_violated_functions(diagnostics);
149        let pass_count = function_count.saturating_sub(violated);
150        (block_count, warn_count, info_count, pass_count)
151    };
152
153    let mut output = serde_json::json!({
154        "version": env!("CARGO_PKG_VERSION"),
155        "summary": {
156            "files": file_count,
157            "functions": function_count,
158            "block": block_count,
159            "warn": warn_count,
160            "info": info_count,
161            "pass": pass_count,
162        },
163        "diagnostics": diagnostics,
164        "metrics": serde_json::to_value(metrics).unwrap_or_default(),
165    });
166
167    if file_count == 0 {
168        output["guidance"] = serde_json::json!("No test files found. Check --lang filter or run from a directory containing test files.");
169    }
170    if !hints.is_empty() {
171        output["hints"] = serde_json::to_value(hints).unwrap_or_default();
172    }
173    serde_json::to_string_pretty(&output).unwrap_or_else(|_| "{}".to_string())
174}
175
176struct RuleMeta {
177    id: &'static str,
178    name: &'static str,
179    short_description: &'static str,
180}
181
182const RULE_REGISTRY: &[RuleMeta] = &[
183    RuleMeta {
184        id: "T001",
185        name: "assertion-free",
186        short_description: "Test function has no assertions",
187    },
188    RuleMeta {
189        id: "T002",
190        name: "mock-overuse",
191        short_description: "Test function uses too many mocks",
192    },
193    RuleMeta {
194        id: "T003",
195        name: "giant-test",
196        short_description: "Test function exceeds line count threshold",
197    },
198    RuleMeta {
199        id: "T004",
200        name: "no-parameterized",
201        short_description: "Low ratio of parameterized tests",
202    },
203    RuleMeta {
204        id: "T005",
205        name: "pbt-missing",
206        short_description: "No property-based testing library imported",
207    },
208    RuleMeta {
209        id: "T006",
210        name: "low-assertion-density",
211        short_description: "Low assertion count per test function",
212    },
213    RuleMeta {
214        id: "T007",
215        name: "test-source-ratio",
216        short_description: "Test file to source file ratio",
217    },
218    RuleMeta {
219        id: "T008",
220        name: "no-contract",
221        short_description: "No contract testing library used in tests",
222    },
223    RuleMeta {
224        id: "T101",
225        name: "how-not-what",
226        short_description: "Test verifies implementation rather than behavior",
227    },
228    RuleMeta {
229        id: "T102",
230        name: "fixture-sprawl",
231        short_description: "Test depends on too many fixtures",
232    },
233    RuleMeta {
234        id: "T103",
235        name: "missing-error-test",
236        short_description: "No error/exception test found in file",
237    },
238    RuleMeta {
239        id: "T105",
240        name: "deterministic-no-metamorphic",
241        short_description: "All assertions use exact equality, no relational checks",
242    },
243    RuleMeta {
244        id: "T106",
245        name: "duplicate-literal-assertion",
246        short_description: "Same literal appears multiple times in assertions",
247    },
248    RuleMeta {
249        id: "T107",
250        name: "assertion-roulette",
251        short_description: "Multiple assertions without failure messages",
252    },
253    RuleMeta {
254        id: "T108",
255        name: "wait-and-see",
256        short_description: "Test uses sleep/delay causing flaky tests",
257    },
258    RuleMeta {
259        id: "T109",
260        name: "undescriptive-test-name",
261        short_description: "Test name does not describe behavior",
262    },
263    RuleMeta {
264        id: "T110",
265        name: "skip-only-test",
266        short_description: "Test skips or marks incomplete without assertions",
267    },
268];
269
270pub fn format_sarif(diagnostics: &[Diagnostic]) -> String {
271    use serde_sarif::sarif;
272
273    let rules: Vec<sarif::ReportingDescriptor> = RULE_REGISTRY
274        .iter()
275        .map(|r| {
276            sarif::ReportingDescriptor::builder()
277                .id(r.id)
278                .name(r.name)
279                .short_description(&String::from(r.short_description))
280                .build()
281        })
282        .collect();
283
284    let results: Vec<sarif::Result> = diagnostics
285        .iter()
286        .map(|d| {
287            let level = match d.severity {
288                Severity::Block => sarif::ResultLevel::Error,
289                Severity::Warn => sarif::ResultLevel::Warning,
290                Severity::Info => sarif::ResultLevel::Note,
291            };
292            let start_line = d.line.unwrap_or(1) as i64;
293            let location = sarif::Location::builder()
294                .physical_location(
295                    sarif::PhysicalLocation::builder()
296                        .artifact_location(sarif::ArtifactLocation::builder().uri(&d.file).build())
297                        .region(sarif::Region::builder().start_line(start_line).build())
298                        .build(),
299                )
300                .build();
301
302            sarif::Result::builder()
303                .rule_id(&d.rule.0)
304                .message(sarif::Message::builder().text(&d.message).build())
305                .level(level)
306                .locations(vec![location])
307                .build()
308        })
309        .collect();
310
311    let tool_component = sarif::ToolComponent::builder()
312        .name("exspec")
313        .version(env!("CARGO_PKG_VERSION"))
314        .rules(rules)
315        .build();
316
317    let invocation = sarif::Invocation::builder()
318        .execution_successful(true)
319        .build();
320
321    let run = sarif::Run::builder()
322        .tool(tool_component)
323        .results(results)
324        .invocations(vec![invocation])
325        .build();
326
327    let sarif_doc = sarif::Sarif::builder()
328        .version(sarif::Version::V2_1_0.to_string())
329        .schema(sarif::SCHEMA_URL)
330        .runs(vec![run])
331        .build();
332
333    serde_json::to_string_pretty(&sarif_doc).unwrap_or_else(|_| "{}".to_string())
334}
335
336/// Filter diagnostics to only include those at or above the given minimum severity.
337pub fn filter_by_severity(diagnostics: &[Diagnostic], min: Severity) -> Vec<Diagnostic> {
338    diagnostics
339        .iter()
340        .filter(|d| d.severity >= min)
341        .cloned()
342        .collect()
343}
344
345/// Summary statistics from unfiltered diagnostics, for JSON/SARIF output.
346#[derive(Debug, Clone, PartialEq, Eq)]
347pub struct SummaryStats {
348    pub block_count: usize,
349    pub warn_count: usize,
350    pub info_count: usize,
351    pub pass_count: usize,
352}
353
354impl SummaryStats {
355    pub fn from_diagnostics(diagnostics: &[Diagnostic], function_count: usize) -> Self {
356        let block_count = diagnostics
357            .iter()
358            .filter(|d| d.severity == Severity::Block)
359            .count();
360        let warn_count = diagnostics
361            .iter()
362            .filter(|d| d.severity == Severity::Warn)
363            .count();
364        let info_count = diagnostics
365            .iter()
366            .filter(|d| d.severity == Severity::Info)
367            .count();
368        let violated = count_violated_functions(diagnostics);
369        let pass_count = function_count.saturating_sub(violated);
370        Self {
371            block_count,
372            warn_count,
373            info_count,
374            pass_count,
375        }
376    }
377}
378
379pub fn compute_exit_code(diagnostics: &[Diagnostic], strict: bool) -> i32 {
380    for d in diagnostics {
381        if d.severity == Severity::Block {
382            return 1;
383        }
384    }
385    if strict {
386        for d in diagnostics {
387            if d.severity == Severity::Warn {
388                return 1;
389            }
390        }
391    }
392    0
393}
394
395#[cfg(test)]
396mod tests {
397    use super::*;
398    use crate::rules::RuleId;
399
400    fn block_diag() -> Diagnostic {
401        Diagnostic {
402            rule: RuleId::new("T001"),
403            severity: Severity::Block,
404            file: "test.py".to_string(),
405            line: Some(10),
406            message: "assertion-free: test has no assertions".to_string(),
407            details: None,
408        }
409    }
410
411    fn warn_diag() -> Diagnostic {
412        Diagnostic {
413            rule: RuleId::new("T003"),
414            severity: Severity::Warn,
415            file: "test.py".to_string(),
416            line: Some(5),
417            message: "giant-test: 73 lines, threshold: 50".to_string(),
418            details: None,
419        }
420    }
421
422    // --- Terminal format ---
423
424    #[test]
425    fn terminal_format_has_summary_header() {
426        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
427        assert!(output.starts_with("exspec v"));
428        assert!(output.contains("1 test files"));
429        assert!(output.contains("1 test functions"));
430    }
431
432    #[test]
433    fn terminal_format_has_score_footer() {
434        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
435        assert!(output.contains("Score: BLOCK 1 | WARN 0 | INFO 0 | PASS 0"));
436    }
437
438    #[test]
439    fn terminal_format_block() {
440        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
441        assert!(output.contains("BLOCK test.py:10 T001 assertion-free: test has no assertions"));
442    }
443
444    #[test]
445    fn terminal_format_warn() {
446        let output = format_terminal(&[warn_diag()], 1, 1, &ProjectMetrics::default(), &[]);
447        assert!(output.contains("WARN test.py:5 T003 giant-test: 73 lines, threshold: 50"));
448    }
449
450    #[test]
451    fn terminal_format_multiple() {
452        let output = format_terminal(
453            &[block_diag(), warn_diag()],
454            2,
455            2,
456            &ProjectMetrics::default(),
457            &[],
458        );
459        assert!(output.contains("BLOCK"));
460        assert!(output.contains("WARN"));
461    }
462
463    #[test]
464    fn terminal_format_empty_has_header_and_footer() {
465        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default(), &[]);
466        assert!(output.contains("exspec v"));
467        assert!(output.contains("Score:"));
468    }
469
470    // --- JSON format ---
471
472    #[test]
473    fn json_format_has_version_and_summary() {
474        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None, &[]);
475        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
476        assert!(parsed["version"].is_string());
477        assert!(parsed["summary"].is_object());
478        assert_eq!(parsed["summary"]["files"], 1);
479        assert_eq!(parsed["summary"]["functions"], 1);
480        assert_eq!(parsed["summary"]["block"], 1);
481        assert_eq!(parsed["summary"]["warn"], 0);
482        assert_eq!(parsed["summary"]["pass"], 0);
483    }
484
485    #[test]
486    fn json_format_has_diagnostics_and_metrics() {
487        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None, &[]);
488        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
489        assert!(parsed["diagnostics"].is_array());
490        assert!(parsed["metrics"].is_object());
491        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 1);
492    }
493
494    #[test]
495    fn json_format_empty() {
496        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None, &[]);
497        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
498        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 0);
499        assert_eq!(parsed["summary"]["functions"], 0);
500    }
501
502    // --- Exit code ---
503
504    // --- Empty result UX ---
505
506    #[test]
507    fn terminal_format_zero_files_shows_guidance() {
508        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default(), &[]);
509        assert!(
510            output.contains("No test files found"),
511            "expected guidance message, got: {output}"
512        );
513    }
514
515    #[test]
516    fn json_format_zero_files_has_guidance() {
517        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None, &[]);
518        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
519        assert!(parsed["guidance"].is_string());
520    }
521
522    // --- pass_count multi-violation ---
523
524    #[test]
525    fn pass_count_with_multi_violation_function() {
526        let d1 = Diagnostic {
527            rule: RuleId::new("T001"),
528            severity: Severity::Block,
529            file: "test.py".to_string(),
530            line: Some(10),
531            message: "assertion-free".to_string(),
532            details: None,
533        };
534        let d2 = Diagnostic {
535            rule: RuleId::new("T003"),
536            severity: Severity::Warn,
537            file: "test.py".to_string(),
538            line: Some(10),
539            message: "giant-test".to_string(),
540            details: None,
541        };
542        let output = format_terminal(&[d1, d2], 1, 2, &ProjectMetrics::default(), &[]);
543        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
544    }
545
546    #[test]
547    fn pass_count_excludes_file_level_diagnostics() {
548        let d1 = Diagnostic {
549            rule: RuleId::new("T004"),
550            severity: Severity::Info,
551            file: "test.py".to_string(),
552            line: None,
553            message: "no-parameterized".to_string(),
554            details: None,
555        };
556        let output = format_terminal(&[d1], 1, 1, &ProjectMetrics::default(), &[]);
557        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
558    }
559
560    #[test]
561    fn terminal_format_nonzero_files_no_guidance() {
562        let output = format_terminal(&[], 1, 0, &ProjectMetrics::default(), &[]);
563        assert!(!output.contains("No test files found"));
564    }
565
566    #[test]
567    fn exit_code_block_returns_1() {
568        assert_eq!(compute_exit_code(&[block_diag()], false), 1);
569    }
570
571    #[test]
572    fn exit_code_warn_only_returns_0() {
573        assert_eq!(compute_exit_code(&[warn_diag()], false), 0);
574    }
575
576    #[test]
577    fn exit_code_strict_warn_returns_1() {
578        assert_eq!(compute_exit_code(&[warn_diag()], true), 1);
579    }
580
581    #[test]
582    fn exit_code_empty_returns_0() {
583        assert_eq!(compute_exit_code(&[], false), 0);
584    }
585
586    // --- Metrics display ---
587
588    #[test]
589    fn terminal_metrics_section_between_diagnostics_and_score() {
590        let metrics = ProjectMetrics {
591            mock_density_avg: 2.3,
592            mock_class_max: 4,
593            parameterized_ratio: 0.15,
594            pbt_ratio: 0.4,
595            assertion_density_avg: 1.8,
596            contract_coverage: 0.2,
597            ..Default::default()
598        };
599        let output = format_terminal(&[block_diag()], 5, 187, &metrics, &[]);
600        let metrics_pos = output.find("Metrics:").expect("Metrics section missing");
601        let diag_pos = output.find("BLOCK test.py").expect("diagnostic missing");
602        let score_pos = output.find("Score:").expect("Score missing");
603        assert!(
604            diag_pos < metrics_pos,
605            "Metrics should come after diagnostics"
606        );
607        assert!(metrics_pos < score_pos, "Metrics should come before Score");
608    }
609
610    #[test]
611    fn terminal_metrics_mock_density_line() {
612        let metrics = ProjectMetrics {
613            mock_density_avg: 2.3,
614            mock_class_max: 4,
615            ..Default::default()
616        };
617        let output = format_terminal(&[], 1, 1, &metrics, &[]);
618        assert!(
619            output.contains("2.3/test (avg)"),
620            "mock density avg: {output}"
621        );
622        assert!(
623            output.contains("4 distinct classes/test (max)"),
624            "mock class max: {output}"
625        );
626    }
627
628    #[test]
629    fn terminal_metrics_parameterized_line() {
630        let metrics = ProjectMetrics {
631            parameterized_ratio: 0.15,
632            ..Default::default()
633        };
634        let output = format_terminal(&[], 5, 20, &metrics, &[]);
635        assert!(output.contains("15%"), "parameterized pct: {output}");
636        assert!(output.contains("3/20"), "parameterized fraction: {output}");
637    }
638
639    #[test]
640    fn terminal_metrics_pbt_and_contract_file_count() {
641        let metrics = ProjectMetrics {
642            pbt_ratio: 0.4,
643            contract_coverage: 0.2,
644            ..Default::default()
645        };
646        let output = format_terminal(&[], 5, 1, &metrics, &[]);
647        assert!(output.contains("2/5 files"), "pbt files: {output}");
648        assert!(output.contains("1/5 files"), "contract files: {output}");
649    }
650
651    #[test]
652    fn json_metrics_has_all_fields() {
653        let metrics = ProjectMetrics {
654            mock_density_avg: 1.5,
655            mock_class_max: 2,
656            parameterized_ratio: 0.3,
657            pbt_ratio: 0.5,
658            assertion_density_avg: 2.0,
659            contract_coverage: 0.1,
660            test_source_ratio: 0.8,
661        };
662        let output = format_json(&[], 1, 1, &metrics, None, &[]);
663        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
664        let m = &parsed["metrics"];
665        assert_eq!(m["mock_density_avg"], 1.5);
666        assert_eq!(m["mock_class_max"], 2);
667        assert_eq!(m["parameterized_ratio"], 0.3);
668        assert_eq!(m["pbt_ratio"], 0.5);
669        assert_eq!(m["assertion_density_avg"], 2.0);
670        assert_eq!(m["contract_coverage"], 0.1);
671        assert_eq!(m["test_source_ratio"], 0.8);
672    }
673
674    #[test]
675    fn json_metrics_values_are_numbers() {
676        let metrics = ProjectMetrics {
677            mock_density_avg: 1.0,
678            mock_class_max: 3,
679            ..Default::default()
680        };
681        let output = format_json(&[], 1, 1, &metrics, None, &[]);
682        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
683        assert!(parsed["metrics"]["mock_density_avg"].is_number());
684        assert!(parsed["metrics"]["mock_class_max"].is_number());
685    }
686
687    // --- SARIF format ---
688
689    fn info_diag() -> Diagnostic {
690        Diagnostic {
691            rule: RuleId::new("T005"),
692            severity: Severity::Info,
693            file: "test.py".to_string(),
694            line: None,
695            message: "pbt-missing".to_string(),
696            details: None,
697        }
698    }
699
700    fn parse_sarif(output: &str) -> serde_json::Value {
701        serde_json::from_str(output).expect("SARIF should be valid JSON")
702    }
703
704    #[test]
705    fn sarif_valid_json() {
706        let output = format_sarif(&[block_diag()]);
707        parse_sarif(&output);
708    }
709
710    #[test]
711    fn sarif_has_schema_url() {
712        let output = format_sarif(&[]);
713        let parsed = parse_sarif(&output);
714        assert!(parsed["$schema"].is_string());
715        assert!(parsed["$schema"].as_str().unwrap().contains("sarif"));
716    }
717
718    #[test]
719    fn sarif_version_2_1_0() {
720        let output = format_sarif(&[]);
721        let parsed = parse_sarif(&output);
722        assert_eq!(parsed["version"], "2.1.0");
723    }
724
725    #[test]
726    fn sarif_tool_driver_name() {
727        let output = format_sarif(&[]);
728        let parsed = parse_sarif(&output);
729        assert_eq!(parsed["runs"][0]["tool"]["driver"]["name"], "exspec");
730    }
731
732    #[test]
733    fn sarif_tool_driver_version() {
734        let output = format_sarif(&[]);
735        let parsed = parse_sarif(&output);
736        assert_eq!(
737            parsed["runs"][0]["tool"]["driver"]["version"],
738            env!("CARGO_PKG_VERSION")
739        );
740    }
741
742    #[test]
743    fn sarif_rules_match_registry_count() {
744        let output = format_sarif(&[]);
745        let parsed = parse_sarif(&output);
746        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
747            .as_array()
748            .unwrap();
749        assert_eq!(rules.len(), RULE_REGISTRY.len());
750    }
751
752    #[test]
753    fn sarif_rules_have_short_description() {
754        let output = format_sarif(&[]);
755        let parsed = parse_sarif(&output);
756        let rule0 = &parsed["runs"][0]["tool"]["driver"]["rules"][0];
757        assert!(rule0["shortDescription"].is_object());
758        assert!(rule0["shortDescription"]["text"].is_string());
759    }
760
761    #[test]
762    fn sarif_rules_include_all_registry_entries() {
763        let output = format_sarif(&[]);
764        let parsed = parse_sarif(&output);
765        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
766            .as_array()
767            .unwrap();
768        for meta in RULE_REGISTRY {
769            assert!(
770                rules.iter().any(|rule| rule["id"] == meta.id),
771                "SARIF rules array should include {} metadata",
772                meta.id
773            );
774        }
775    }
776
777    #[test]
778    fn sarif_block_maps_to_error() {
779        let output = format_sarif(&[block_diag()]);
780        let parsed = parse_sarif(&output);
781        assert_eq!(parsed["runs"][0]["results"][0]["level"], "error");
782    }
783
784    #[test]
785    fn sarif_warn_maps_to_warning() {
786        let output = format_sarif(&[warn_diag()]);
787        let parsed = parse_sarif(&output);
788        assert_eq!(parsed["runs"][0]["results"][0]["level"], "warning");
789    }
790
791    #[test]
792    fn sarif_info_maps_to_note() {
793        let output = format_sarif(&[info_diag()]);
794        let parsed = parse_sarif(&output);
795        assert_eq!(parsed["runs"][0]["results"][0]["level"], "note");
796    }
797
798    #[test]
799    fn sarif_file_level_diag_start_line_1() {
800        let output = format_sarif(&[info_diag()]);
801        let parsed = parse_sarif(&output);
802        let region = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"]["region"];
803        assert_eq!(region["startLine"], 1);
804    }
805
806    #[test]
807    fn sarif_result_has_location_and_uri() {
808        let output = format_sarif(&[block_diag()]);
809        let parsed = parse_sarif(&output);
810        let loc = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"];
811        assert_eq!(loc["artifactLocation"]["uri"], "test.py");
812        assert_eq!(loc["region"]["startLine"], 10);
813    }
814
815    #[test]
816    fn sarif_empty_diagnostics_empty_results() {
817        let output = format_sarif(&[]);
818        let parsed = parse_sarif(&output);
819        let results = parsed["runs"][0]["results"].as_array().unwrap();
820        assert!(results.is_empty());
821        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
822            .as_array()
823            .unwrap();
824        assert_eq!(rules.len(), RULE_REGISTRY.len());
825    }
826
827    // --- #59: filter_by_severity ---
828
829    #[test]
830    fn filter_by_severity_block_keeps_only_block() {
831        let diags = vec![block_diag(), warn_diag(), info_diag()];
832        let filtered = filter_by_severity(&diags, Severity::Block);
833        assert_eq!(filtered.len(), 1);
834        assert_eq!(filtered[0].severity, Severity::Block);
835    }
836
837    #[test]
838    fn filter_by_severity_warn_keeps_block_and_warn() {
839        let diags = vec![block_diag(), warn_diag(), info_diag()];
840        let filtered = filter_by_severity(&diags, Severity::Warn);
841        assert_eq!(filtered.len(), 2);
842    }
843
844    #[test]
845    fn filter_by_severity_info_keeps_all() {
846        let diags = vec![block_diag(), warn_diag(), info_diag()];
847        let filtered = filter_by_severity(&diags, Severity::Info);
848        assert_eq!(filtered.len(), 3);
849    }
850
851    #[test]
852    fn filter_by_severity_empty_input() {
853        let filtered = filter_by_severity(&[], Severity::Block);
854        assert!(filtered.is_empty());
855    }
856
857    // --- #59: SummaryStats ---
858
859    #[test]
860    fn summary_stats_from_diagnostics() {
861        let diags = vec![block_diag(), warn_diag(), info_diag()];
862        let stats = SummaryStats::from_diagnostics(&diags, 5);
863        assert_eq!(stats.block_count, 1);
864        assert_eq!(stats.warn_count, 1);
865        assert_eq!(stats.info_count, 1);
866        // 3 distinct (file,line) violated out of 5 → pass=3 except info_diag has line=None
867        // block_diag line=10, warn_diag line=5, info_diag line=None → 2 violated → pass=3
868        assert_eq!(stats.pass_count, 3);
869    }
870
871    // --- #59: Terminal filtered score ---
872
873    #[test]
874    fn terminal_format_filtered_shows_filtered_counts() {
875        let filtered = vec![block_diag()]; // original had block+warn+info but filtered to block only
876        let output = format_terminal(&filtered, 1, 3, &ProjectMetrics::default(), &[]);
877        assert!(
878            output.contains("BLOCK 1 | WARN 0 | INFO 0"),
879            "score should reflect filtered diagnostics: {output}"
880        );
881        assert!(!output.contains("WARN test.py"));
882    }
883
884    // --- #59: JSON filtered array + unfiltered summary ---
885
886    #[test]
887    fn json_filtered_diagnostics_with_unfiltered_summary() {
888        let filtered = vec![block_diag()];
889        let stats = SummaryStats {
890            block_count: 1,
891            warn_count: 1,
892            info_count: 1,
893            pass_count: 2,
894        };
895        let output = format_json(
896            &filtered,
897            1,
898            5,
899            &ProjectMetrics::default(),
900            Some(&stats),
901            &[],
902        );
903        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
904        assert_eq!(
905            parsed["diagnostics"].as_array().unwrap().len(),
906            1,
907            "diagnostics should be filtered"
908        );
909        assert_eq!(parsed["summary"]["warn"], 1, "summary should be unfiltered");
910        assert_eq!(parsed["summary"]["info"], 1, "summary should be unfiltered");
911    }
912
913    #[test]
914    fn sarif_invocations_execution_successful() {
915        let output = format_sarif(&[]);
916        let parsed = parse_sarif(&output);
917        assert_eq!(
918            parsed["runs"][0]["invocations"][0]["executionSuccessful"],
919            true
920        );
921    }
922}