Skip to main content

exspec_core/
output.rs

1use std::collections::HashSet;
2
3use crate::metrics::ProjectMetrics;
4use crate::rules::{Diagnostic, Severity};
5
6#[derive(Debug, Clone, Copy, PartialEq, Eq)]
7pub enum OutputFormat {
8    Terminal,
9    Json,
10    Sarif,
11}
12
13/// Count unique violated functions by (file, line) pairs.
14/// Only per-function diagnostics (line=Some) are counted.
15fn count_violated_functions(diagnostics: &[Diagnostic]) -> usize {
16    diagnostics
17        .iter()
18        .filter_map(|d| d.line.map(|l| (d.file.as_str(), l)))
19        .collect::<HashSet<_>>()
20        .len()
21}
22
23pub fn format_terminal(
24    diagnostics: &[Diagnostic],
25    file_count: usize,
26    function_count: usize,
27    metrics: &ProjectMetrics,
28) -> String {
29    let mut lines = Vec::new();
30
31    lines.push(format!(
32        "exspec v{} -- {} test files, {} test functions",
33        env!("CARGO_PKG_VERSION"),
34        file_count,
35        function_count,
36    ));
37
38    if file_count == 0 {
39        lines.push("No test files found. Check --lang filter or run from a directory containing test files.".to_string());
40    }
41
42    for d in diagnostics {
43        let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
44        lines.push(format!(
45            "{} {}{} {} {}",
46            d.severity, d.file, line_str, d.rule, d.message,
47        ));
48    }
49
50    // Metrics section
51    lines.push("Metrics:".to_string());
52    lines.push(format!(
53        "  Mock density:      {:.1}/test (avg), {} distinct classes/test (max)",
54        metrics.mock_density_avg, metrics.mock_class_max,
55    ));
56
57    let total_functions_for_param = if function_count > 0 {
58        let count = (metrics.parameterized_ratio * function_count as f64).round() as usize;
59        format!("{count}/{function_count}")
60    } else {
61        "0/0".to_string()
62    };
63    lines.push(format!(
64        "  Parameterized:     {:.0}% ({})",
65        metrics.parameterized_ratio * 100.0,
66        total_functions_for_param,
67    ));
68
69    let pbt_files = (metrics.pbt_ratio * file_count as f64).round() as usize;
70    lines.push(format!(
71        "  PBT usage:         {:.0}% ({}/{} files)",
72        metrics.pbt_ratio * 100.0,
73        pbt_files,
74        file_count,
75    ));
76
77    lines.push(format!(
78        "  Assertion density: {:.1}/test (avg)",
79        metrics.assertion_density_avg,
80    ));
81
82    let contract_files = (metrics.contract_coverage * file_count as f64).round() as usize;
83    lines.push(format!(
84        "  Contract coverage: {:.0}% ({}/{} files)",
85        metrics.contract_coverage * 100.0,
86        contract_files,
87        file_count,
88    ));
89
90    // Score section
91    let block_count = diagnostics
92        .iter()
93        .filter(|d| d.severity == Severity::Block)
94        .count();
95    let warn_count = diagnostics
96        .iter()
97        .filter(|d| d.severity == Severity::Warn)
98        .count();
99    let info_count = diagnostics
100        .iter()
101        .filter(|d| d.severity == Severity::Info)
102        .count();
103    let violated = count_violated_functions(diagnostics);
104    let pass_count = function_count.saturating_sub(violated);
105    lines.push(format!(
106        "Score: BLOCK {block_count} | WARN {warn_count} | INFO {info_count} | PASS {pass_count}",
107    ));
108
109    lines.join("\n")
110}
111
112pub fn format_json(
113    diagnostics: &[Diagnostic],
114    file_count: usize,
115    function_count: usize,
116    metrics: &ProjectMetrics,
117    unfiltered_summary: Option<&SummaryStats>,
118) -> String {
119    let (block_count, warn_count, info_count, pass_count) = if let Some(stats) = unfiltered_summary
120    {
121        (
122            stats.block_count,
123            stats.warn_count,
124            stats.info_count,
125            stats.pass_count,
126        )
127    } else {
128        let block_count = diagnostics
129            .iter()
130            .filter(|d| d.severity == Severity::Block)
131            .count();
132        let warn_count = diagnostics
133            .iter()
134            .filter(|d| d.severity == Severity::Warn)
135            .count();
136        let info_count = diagnostics
137            .iter()
138            .filter(|d| d.severity == Severity::Info)
139            .count();
140        let violated = count_violated_functions(diagnostics);
141        let pass_count = function_count.saturating_sub(violated);
142        (block_count, warn_count, info_count, pass_count)
143    };
144
145    let mut output = serde_json::json!({
146        "version": env!("CARGO_PKG_VERSION"),
147        "summary": {
148            "files": file_count,
149            "functions": function_count,
150            "block": block_count,
151            "warn": warn_count,
152            "info": info_count,
153            "pass": pass_count,
154        },
155        "diagnostics": diagnostics,
156        "metrics": serde_json::to_value(metrics).unwrap_or_default(),
157    });
158
159    if file_count == 0 {
160        output["guidance"] = serde_json::json!("No test files found. Check --lang filter or run from a directory containing test files.");
161    }
162    serde_json::to_string_pretty(&output).unwrap_or_else(|_| "{}".to_string())
163}
164
165struct RuleMeta {
166    id: &'static str,
167    name: &'static str,
168    short_description: &'static str,
169}
170
171const RULE_REGISTRY: &[RuleMeta] = &[
172    RuleMeta {
173        id: "T001",
174        name: "assertion-free",
175        short_description: "Test function has no assertions",
176    },
177    RuleMeta {
178        id: "T002",
179        name: "mock-overuse",
180        short_description: "Test function uses too many mocks",
181    },
182    RuleMeta {
183        id: "T003",
184        name: "giant-test",
185        short_description: "Test function exceeds line count threshold",
186    },
187    RuleMeta {
188        id: "T004",
189        name: "no-parameterized",
190        short_description: "Low ratio of parameterized tests",
191    },
192    RuleMeta {
193        id: "T005",
194        name: "pbt-missing",
195        short_description: "No property-based testing library imported",
196    },
197    RuleMeta {
198        id: "T006",
199        name: "low-assertion-density",
200        short_description: "Low assertion count per test function",
201    },
202    RuleMeta {
203        id: "T007",
204        name: "test-source-ratio",
205        short_description: "Test file to source file ratio",
206    },
207    RuleMeta {
208        id: "T008",
209        name: "no-contract",
210        short_description: "No contract testing library used in tests",
211    },
212    RuleMeta {
213        id: "T101",
214        name: "how-not-what",
215        short_description: "Test verifies implementation rather than behavior",
216    },
217    RuleMeta {
218        id: "T102",
219        name: "fixture-sprawl",
220        short_description: "Test depends on too many fixtures",
221    },
222    RuleMeta {
223        id: "T103",
224        name: "missing-error-test",
225        short_description: "No error/exception test found in file",
226    },
227    RuleMeta {
228        id: "T105",
229        name: "deterministic-no-metamorphic",
230        short_description: "All assertions use exact equality, no relational checks",
231    },
232    RuleMeta {
233        id: "T106",
234        name: "duplicate-literal-assertion",
235        short_description: "Same literal appears multiple times in assertions",
236    },
237    RuleMeta {
238        id: "T107",
239        name: "assertion-roulette",
240        short_description: "Multiple assertions without failure messages",
241    },
242    RuleMeta {
243        id: "T108",
244        name: "wait-and-see",
245        short_description: "Test uses sleep/delay causing flaky tests",
246    },
247    RuleMeta {
248        id: "T109",
249        name: "undescriptive-test-name",
250        short_description: "Test name does not describe behavior",
251    },
252];
253
254pub fn format_sarif(diagnostics: &[Diagnostic]) -> String {
255    use serde_sarif::sarif;
256
257    let rules: Vec<sarif::ReportingDescriptor> = RULE_REGISTRY
258        .iter()
259        .map(|r| {
260            sarif::ReportingDescriptor::builder()
261                .id(r.id)
262                .name(r.name)
263                .short_description(&String::from(r.short_description))
264                .build()
265        })
266        .collect();
267
268    let results: Vec<sarif::Result> = diagnostics
269        .iter()
270        .map(|d| {
271            let level = match d.severity {
272                Severity::Block => sarif::ResultLevel::Error,
273                Severity::Warn => sarif::ResultLevel::Warning,
274                Severity::Info => sarif::ResultLevel::Note,
275            };
276            let start_line = d.line.unwrap_or(1) as i64;
277            let location = sarif::Location::builder()
278                .physical_location(
279                    sarif::PhysicalLocation::builder()
280                        .artifact_location(sarif::ArtifactLocation::builder().uri(&d.file).build())
281                        .region(sarif::Region::builder().start_line(start_line).build())
282                        .build(),
283                )
284                .build();
285
286            sarif::Result::builder()
287                .rule_id(&d.rule.0)
288                .message(sarif::Message::builder().text(&d.message).build())
289                .level(level)
290                .locations(vec![location])
291                .build()
292        })
293        .collect();
294
295    let tool_component = sarif::ToolComponent::builder()
296        .name("exspec")
297        .version(env!("CARGO_PKG_VERSION"))
298        .rules(rules)
299        .build();
300
301    let invocation = sarif::Invocation::builder()
302        .execution_successful(true)
303        .build();
304
305    let run = sarif::Run::builder()
306        .tool(tool_component)
307        .results(results)
308        .invocations(vec![invocation])
309        .build();
310
311    let sarif_doc = sarif::Sarif::builder()
312        .version(sarif::Version::V2_1_0.to_string())
313        .schema(sarif::SCHEMA_URL)
314        .runs(vec![run])
315        .build();
316
317    serde_json::to_string_pretty(&sarif_doc).unwrap_or_else(|_| "{}".to_string())
318}
319
320/// Filter diagnostics to only include those at or above the given minimum severity.
321pub fn filter_by_severity(diagnostics: &[Diagnostic], min: Severity) -> Vec<Diagnostic> {
322    diagnostics
323        .iter()
324        .filter(|d| d.severity >= min)
325        .cloned()
326        .collect()
327}
328
329/// Summary statistics from unfiltered diagnostics, for JSON/SARIF output.
330#[derive(Debug, Clone, PartialEq, Eq)]
331pub struct SummaryStats {
332    pub block_count: usize,
333    pub warn_count: usize,
334    pub info_count: usize,
335    pub pass_count: usize,
336}
337
338impl SummaryStats {
339    pub fn from_diagnostics(diagnostics: &[Diagnostic], function_count: usize) -> Self {
340        let block_count = diagnostics
341            .iter()
342            .filter(|d| d.severity == Severity::Block)
343            .count();
344        let warn_count = diagnostics
345            .iter()
346            .filter(|d| d.severity == Severity::Warn)
347            .count();
348        let info_count = diagnostics
349            .iter()
350            .filter(|d| d.severity == Severity::Info)
351            .count();
352        let violated = count_violated_functions(diagnostics);
353        let pass_count = function_count.saturating_sub(violated);
354        Self {
355            block_count,
356            warn_count,
357            info_count,
358            pass_count,
359        }
360    }
361}
362
363pub fn compute_exit_code(diagnostics: &[Diagnostic], strict: bool) -> i32 {
364    for d in diagnostics {
365        if d.severity == Severity::Block {
366            return 1;
367        }
368    }
369    if strict {
370        for d in diagnostics {
371            if d.severity == Severity::Warn {
372                return 1;
373            }
374        }
375    }
376    0
377}
378
379#[cfg(test)]
380mod tests {
381    use super::*;
382    use crate::rules::RuleId;
383
384    fn block_diag() -> Diagnostic {
385        Diagnostic {
386            rule: RuleId::new("T001"),
387            severity: Severity::Block,
388            file: "test.py".to_string(),
389            line: Some(10),
390            message: "assertion-free: test has no assertions".to_string(),
391            details: None,
392        }
393    }
394
395    fn warn_diag() -> Diagnostic {
396        Diagnostic {
397            rule: RuleId::new("T003"),
398            severity: Severity::Warn,
399            file: "test.py".to_string(),
400            line: Some(5),
401            message: "giant-test: 73 lines, threshold: 50".to_string(),
402            details: None,
403        }
404    }
405
406    // --- Terminal format ---
407
408    #[test]
409    fn terminal_format_has_summary_header() {
410        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
411        assert!(output.starts_with("exspec v"));
412        assert!(output.contains("1 test files"));
413        assert!(output.contains("1 test functions"));
414    }
415
416    #[test]
417    fn terminal_format_has_score_footer() {
418        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
419        assert!(output.contains("Score: BLOCK 1 | WARN 0 | INFO 0 | PASS 0"));
420    }
421
422    #[test]
423    fn terminal_format_block() {
424        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default());
425        assert!(output.contains("BLOCK test.py:10 T001 assertion-free: test has no assertions"));
426    }
427
428    #[test]
429    fn terminal_format_warn() {
430        let output = format_terminal(&[warn_diag()], 1, 1, &ProjectMetrics::default());
431        assert!(output.contains("WARN test.py:5 T003 giant-test: 73 lines, threshold: 50"));
432    }
433
434    #[test]
435    fn terminal_format_multiple() {
436        let output = format_terminal(
437            &[block_diag(), warn_diag()],
438            2,
439            2,
440            &ProjectMetrics::default(),
441        );
442        assert!(output.contains("BLOCK"));
443        assert!(output.contains("WARN"));
444    }
445
446    #[test]
447    fn terminal_format_empty_has_header_and_footer() {
448        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default());
449        assert!(output.contains("exspec v"));
450        assert!(output.contains("Score:"));
451    }
452
453    // --- JSON format ---
454
455    #[test]
456    fn json_format_has_version_and_summary() {
457        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None);
458        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
459        assert!(parsed["version"].is_string());
460        assert!(parsed["summary"].is_object());
461        assert_eq!(parsed["summary"]["files"], 1);
462        assert_eq!(parsed["summary"]["functions"], 1);
463        assert_eq!(parsed["summary"]["block"], 1);
464        assert_eq!(parsed["summary"]["warn"], 0);
465        assert_eq!(parsed["summary"]["pass"], 0);
466    }
467
468    #[test]
469    fn json_format_has_diagnostics_and_metrics() {
470        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None);
471        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
472        assert!(parsed["diagnostics"].is_array());
473        assert!(parsed["metrics"].is_object());
474        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 1);
475    }
476
477    #[test]
478    fn json_format_empty() {
479        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None);
480        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
481        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 0);
482        assert_eq!(parsed["summary"]["functions"], 0);
483    }
484
485    // --- Exit code ---
486
487    // --- Empty result UX ---
488
489    #[test]
490    fn terminal_format_zero_files_shows_guidance() {
491        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default());
492        assert!(
493            output.contains("No test files found"),
494            "expected guidance message, got: {output}"
495        );
496    }
497
498    #[test]
499    fn json_format_zero_files_has_guidance() {
500        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None);
501        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
502        assert!(parsed["guidance"].is_string());
503    }
504
505    // --- pass_count multi-violation ---
506
507    #[test]
508    fn pass_count_with_multi_violation_function() {
509        let d1 = Diagnostic {
510            rule: RuleId::new("T001"),
511            severity: Severity::Block,
512            file: "test.py".to_string(),
513            line: Some(10),
514            message: "assertion-free".to_string(),
515            details: None,
516        };
517        let d2 = Diagnostic {
518            rule: RuleId::new("T003"),
519            severity: Severity::Warn,
520            file: "test.py".to_string(),
521            line: Some(10),
522            message: "giant-test".to_string(),
523            details: None,
524        };
525        let output = format_terminal(&[d1, d2], 1, 2, &ProjectMetrics::default());
526        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
527    }
528
529    #[test]
530    fn pass_count_excludes_file_level_diagnostics() {
531        let d1 = Diagnostic {
532            rule: RuleId::new("T004"),
533            severity: Severity::Info,
534            file: "test.py".to_string(),
535            line: None,
536            message: "no-parameterized".to_string(),
537            details: None,
538        };
539        let output = format_terminal(&[d1], 1, 1, &ProjectMetrics::default());
540        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
541    }
542
543    #[test]
544    fn terminal_format_nonzero_files_no_guidance() {
545        let output = format_terminal(&[], 1, 0, &ProjectMetrics::default());
546        assert!(!output.contains("No test files found"));
547    }
548
549    #[test]
550    fn exit_code_block_returns_1() {
551        assert_eq!(compute_exit_code(&[block_diag()], false), 1);
552    }
553
554    #[test]
555    fn exit_code_warn_only_returns_0() {
556        assert_eq!(compute_exit_code(&[warn_diag()], false), 0);
557    }
558
559    #[test]
560    fn exit_code_strict_warn_returns_1() {
561        assert_eq!(compute_exit_code(&[warn_diag()], true), 1);
562    }
563
564    #[test]
565    fn exit_code_empty_returns_0() {
566        assert_eq!(compute_exit_code(&[], false), 0);
567    }
568
569    // --- Metrics display ---
570
571    #[test]
572    fn terminal_metrics_section_between_diagnostics_and_score() {
573        let metrics = ProjectMetrics {
574            mock_density_avg: 2.3,
575            mock_class_max: 4,
576            parameterized_ratio: 0.15,
577            pbt_ratio: 0.4,
578            assertion_density_avg: 1.8,
579            contract_coverage: 0.2,
580            ..Default::default()
581        };
582        let output = format_terminal(&[block_diag()], 5, 187, &metrics);
583        let metrics_pos = output.find("Metrics:").expect("Metrics section missing");
584        let diag_pos = output.find("BLOCK test.py").expect("diagnostic missing");
585        let score_pos = output.find("Score:").expect("Score missing");
586        assert!(
587            diag_pos < metrics_pos,
588            "Metrics should come after diagnostics"
589        );
590        assert!(metrics_pos < score_pos, "Metrics should come before Score");
591    }
592
593    #[test]
594    fn terminal_metrics_mock_density_line() {
595        let metrics = ProjectMetrics {
596            mock_density_avg: 2.3,
597            mock_class_max: 4,
598            ..Default::default()
599        };
600        let output = format_terminal(&[], 1, 1, &metrics);
601        assert!(
602            output.contains("2.3/test (avg)"),
603            "mock density avg: {output}"
604        );
605        assert!(
606            output.contains("4 distinct classes/test (max)"),
607            "mock class max: {output}"
608        );
609    }
610
611    #[test]
612    fn terminal_metrics_parameterized_line() {
613        let metrics = ProjectMetrics {
614            parameterized_ratio: 0.15,
615            ..Default::default()
616        };
617        let output = format_terminal(&[], 5, 20, &metrics);
618        assert!(output.contains("15%"), "parameterized pct: {output}");
619        assert!(output.contains("3/20"), "parameterized fraction: {output}");
620    }
621
622    #[test]
623    fn terminal_metrics_pbt_and_contract_file_count() {
624        let metrics = ProjectMetrics {
625            pbt_ratio: 0.4,
626            contract_coverage: 0.2,
627            ..Default::default()
628        };
629        let output = format_terminal(&[], 5, 1, &metrics);
630        assert!(output.contains("2/5 files"), "pbt files: {output}");
631        assert!(output.contains("1/5 files"), "contract files: {output}");
632    }
633
634    #[test]
635    fn json_metrics_has_all_fields() {
636        let metrics = ProjectMetrics {
637            mock_density_avg: 1.5,
638            mock_class_max: 2,
639            parameterized_ratio: 0.3,
640            pbt_ratio: 0.5,
641            assertion_density_avg: 2.0,
642            contract_coverage: 0.1,
643            test_source_ratio: 0.8,
644        };
645        let output = format_json(&[], 1, 1, &metrics, None);
646        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
647        let m = &parsed["metrics"];
648        assert_eq!(m["mock_density_avg"], 1.5);
649        assert_eq!(m["mock_class_max"], 2);
650        assert_eq!(m["parameterized_ratio"], 0.3);
651        assert_eq!(m["pbt_ratio"], 0.5);
652        assert_eq!(m["assertion_density_avg"], 2.0);
653        assert_eq!(m["contract_coverage"], 0.1);
654        assert_eq!(m["test_source_ratio"], 0.8);
655    }
656
657    #[test]
658    fn json_metrics_values_are_numbers() {
659        let metrics = ProjectMetrics {
660            mock_density_avg: 1.0,
661            mock_class_max: 3,
662            ..Default::default()
663        };
664        let output = format_json(&[], 1, 1, &metrics, None);
665        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
666        assert!(parsed["metrics"]["mock_density_avg"].is_number());
667        assert!(parsed["metrics"]["mock_class_max"].is_number());
668    }
669
670    // --- SARIF format ---
671
672    fn info_diag() -> Diagnostic {
673        Diagnostic {
674            rule: RuleId::new("T005"),
675            severity: Severity::Info,
676            file: "test.py".to_string(),
677            line: None,
678            message: "pbt-missing".to_string(),
679            details: None,
680        }
681    }
682
683    fn parse_sarif(output: &str) -> serde_json::Value {
684        serde_json::from_str(output).expect("SARIF should be valid JSON")
685    }
686
687    #[test]
688    fn sarif_valid_json() {
689        let output = format_sarif(&[block_diag()]);
690        parse_sarif(&output);
691    }
692
693    #[test]
694    fn sarif_has_schema_url() {
695        let output = format_sarif(&[]);
696        let parsed = parse_sarif(&output);
697        assert!(parsed["$schema"].is_string());
698        assert!(parsed["$schema"].as_str().unwrap().contains("sarif"));
699    }
700
701    #[test]
702    fn sarif_version_2_1_0() {
703        let output = format_sarif(&[]);
704        let parsed = parse_sarif(&output);
705        assert_eq!(parsed["version"], "2.1.0");
706    }
707
708    #[test]
709    fn sarif_tool_driver_name() {
710        let output = format_sarif(&[]);
711        let parsed = parse_sarif(&output);
712        assert_eq!(parsed["runs"][0]["tool"]["driver"]["name"], "exspec");
713    }
714
715    #[test]
716    fn sarif_tool_driver_version() {
717        let output = format_sarif(&[]);
718        let parsed = parse_sarif(&output);
719        assert_eq!(
720            parsed["runs"][0]["tool"]["driver"]["version"],
721            env!("CARGO_PKG_VERSION")
722        );
723    }
724
725    #[test]
726    fn sarif_rules_match_registry_count() {
727        let output = format_sarif(&[]);
728        let parsed = parse_sarif(&output);
729        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
730            .as_array()
731            .unwrap();
732        assert_eq!(rules.len(), RULE_REGISTRY.len());
733    }
734
735    #[test]
736    fn sarif_rules_have_short_description() {
737        let output = format_sarif(&[]);
738        let parsed = parse_sarif(&output);
739        let rule0 = &parsed["runs"][0]["tool"]["driver"]["rules"][0];
740        assert!(rule0["shortDescription"].is_object());
741        assert!(rule0["shortDescription"]["text"].is_string());
742    }
743
744    #[test]
745    fn sarif_block_maps_to_error() {
746        let output = format_sarif(&[block_diag()]);
747        let parsed = parse_sarif(&output);
748        assert_eq!(parsed["runs"][0]["results"][0]["level"], "error");
749    }
750
751    #[test]
752    fn sarif_warn_maps_to_warning() {
753        let output = format_sarif(&[warn_diag()]);
754        let parsed = parse_sarif(&output);
755        assert_eq!(parsed["runs"][0]["results"][0]["level"], "warning");
756    }
757
758    #[test]
759    fn sarif_info_maps_to_note() {
760        let output = format_sarif(&[info_diag()]);
761        let parsed = parse_sarif(&output);
762        assert_eq!(parsed["runs"][0]["results"][0]["level"], "note");
763    }
764
765    #[test]
766    fn sarif_file_level_diag_start_line_1() {
767        let output = format_sarif(&[info_diag()]);
768        let parsed = parse_sarif(&output);
769        let region = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"]["region"];
770        assert_eq!(region["startLine"], 1);
771    }
772
773    #[test]
774    fn sarif_result_has_location_and_uri() {
775        let output = format_sarif(&[block_diag()]);
776        let parsed = parse_sarif(&output);
777        let loc = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"];
778        assert_eq!(loc["artifactLocation"]["uri"], "test.py");
779        assert_eq!(loc["region"]["startLine"], 10);
780    }
781
782    #[test]
783    fn sarif_empty_diagnostics_empty_results() {
784        let output = format_sarif(&[]);
785        let parsed = parse_sarif(&output);
786        let results = parsed["runs"][0]["results"].as_array().unwrap();
787        assert!(results.is_empty());
788        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
789            .as_array()
790            .unwrap();
791        assert_eq!(rules.len(), 16);
792    }
793
794    // --- #59: filter_by_severity ---
795
796    #[test]
797    fn filter_by_severity_block_keeps_only_block() {
798        let diags = vec![block_diag(), warn_diag(), info_diag()];
799        let filtered = filter_by_severity(&diags, Severity::Block);
800        assert_eq!(filtered.len(), 1);
801        assert_eq!(filtered[0].severity, Severity::Block);
802    }
803
804    #[test]
805    fn filter_by_severity_warn_keeps_block_and_warn() {
806        let diags = vec![block_diag(), warn_diag(), info_diag()];
807        let filtered = filter_by_severity(&diags, Severity::Warn);
808        assert_eq!(filtered.len(), 2);
809    }
810
811    #[test]
812    fn filter_by_severity_info_keeps_all() {
813        let diags = vec![block_diag(), warn_diag(), info_diag()];
814        let filtered = filter_by_severity(&diags, Severity::Info);
815        assert_eq!(filtered.len(), 3);
816    }
817
818    #[test]
819    fn filter_by_severity_empty_input() {
820        let filtered = filter_by_severity(&[], Severity::Block);
821        assert!(filtered.is_empty());
822    }
823
824    // --- #59: SummaryStats ---
825
826    #[test]
827    fn summary_stats_from_diagnostics() {
828        let diags = vec![block_diag(), warn_diag(), info_diag()];
829        let stats = SummaryStats::from_diagnostics(&diags, 5);
830        assert_eq!(stats.block_count, 1);
831        assert_eq!(stats.warn_count, 1);
832        assert_eq!(stats.info_count, 1);
833        // 3 distinct (file,line) violated out of 5 → pass=3 except info_diag has line=None
834        // block_diag line=10, warn_diag line=5, info_diag line=None → 2 violated → pass=3
835        assert_eq!(stats.pass_count, 3);
836    }
837
838    // --- #59: Terminal filtered score ---
839
840    #[test]
841    fn terminal_format_filtered_shows_filtered_counts() {
842        let filtered = vec![block_diag()]; // original had block+warn+info but filtered to block only
843        let output = format_terminal(&filtered, 1, 3, &ProjectMetrics::default());
844        assert!(
845            output.contains("BLOCK 1 | WARN 0 | INFO 0"),
846            "score should reflect filtered diagnostics: {output}"
847        );
848        assert!(!output.contains("WARN test.py"));
849    }
850
851    // --- #59: JSON filtered array + unfiltered summary ---
852
853    #[test]
854    fn json_filtered_diagnostics_with_unfiltered_summary() {
855        let filtered = vec![block_diag()];
856        let stats = SummaryStats {
857            block_count: 1,
858            warn_count: 1,
859            info_count: 1,
860            pass_count: 2,
861        };
862        let output = format_json(&filtered, 1, 5, &ProjectMetrics::default(), Some(&stats));
863        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
864        assert_eq!(
865            parsed["diagnostics"].as_array().unwrap().len(),
866            1,
867            "diagnostics should be filtered"
868        );
869        assert_eq!(parsed["summary"]["warn"], 1, "summary should be unfiltered");
870        assert_eq!(parsed["summary"]["info"], 1, "summary should be unfiltered");
871    }
872
873    #[test]
874    fn sarif_invocations_execution_successful() {
875        let output = format_sarif(&[]);
876        let parsed = parse_sarif(&output);
877        assert_eq!(
878            parsed["runs"][0]["invocations"][0]["executionSuccessful"],
879            true
880        );
881    }
882}