Skip to main content

exspec_core/
output.rs

1use std::collections::HashSet;
2
3use crate::hints::Hint;
4use crate::metrics::ProjectMetrics;
5use crate::rules::{Diagnostic, Severity};
6
7#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8pub enum OutputFormat {
9    Terminal,
10    Json,
11    Sarif,
12    AiPrompt,
13}
14
15/// Count unique violated functions by (file, line) pairs.
16/// Only per-function diagnostics (line=Some) are counted.
17fn count_violated_functions(diagnostics: &[Diagnostic]) -> usize {
18    diagnostics
19        .iter()
20        .filter_map(|d| d.line.map(|l| (d.file.as_str(), l)))
21        .collect::<HashSet<_>>()
22        .len()
23}
24
25pub fn format_terminal(
26    diagnostics: &[Diagnostic],
27    file_count: usize,
28    function_count: usize,
29    metrics: &ProjectMetrics,
30    hints: &[Hint],
31) -> String {
32    let mut lines = Vec::new();
33
34    lines.push(format!(
35        "exspec v{} -- {} test files, {} test functions",
36        env!("CARGO_PKG_VERSION"),
37        file_count,
38        function_count,
39    ));
40
41    if file_count == 0 {
42        lines.push("No test files found. Check --lang filter or run from a directory containing test files.".to_string());
43    }
44
45    for d in diagnostics {
46        let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
47        lines.push(format!(
48            "{} {}{} {} {}",
49            d.severity, d.file, line_str, d.rule, d.message,
50        ));
51    }
52
53    // Metrics section
54    lines.push("Metrics:".to_string());
55    lines.push(format!(
56        "  Mock density:      {:.1}/test (avg), {} distinct classes/test (max)",
57        metrics.mock_density_avg, metrics.mock_class_max,
58    ));
59
60    let total_functions_for_param = if function_count > 0 {
61        let count = (metrics.parameterized_ratio * function_count as f64).round() as usize;
62        format!("{count}/{function_count}")
63    } else {
64        "0/0".to_string()
65    };
66    lines.push(format!(
67        "  Parameterized:     {:.0}% ({})",
68        metrics.parameterized_ratio * 100.0,
69        total_functions_for_param,
70    ));
71
72    let pbt_files = (metrics.pbt_ratio * file_count as f64).round() as usize;
73    lines.push(format!(
74        "  PBT usage:         {:.0}% ({}/{} files)",
75        metrics.pbt_ratio * 100.0,
76        pbt_files,
77        file_count,
78    ));
79
80    lines.push(format!(
81        "  Assertion density: {:.1}/test (avg)",
82        metrics.assertion_density_avg,
83    ));
84
85    let contract_files = (metrics.contract_coverage * file_count as f64).round() as usize;
86    lines.push(format!(
87        "  Contract coverage: {:.0}% ({}/{} files)",
88        metrics.contract_coverage * 100.0,
89        contract_files,
90        file_count,
91    ));
92
93    // Score section
94    let block_count = diagnostics
95        .iter()
96        .filter(|d| d.severity == Severity::Block)
97        .count();
98    let warn_count = diagnostics
99        .iter()
100        .filter(|d| d.severity == Severity::Warn)
101        .count();
102    let info_count = diagnostics
103        .iter()
104        .filter(|d| d.severity == Severity::Info)
105        .count();
106    let violated = count_violated_functions(diagnostics);
107    let pass_count = function_count.saturating_sub(violated);
108    lines.push(format!(
109        "Score: BLOCK {block_count} | WARN {warn_count} | INFO {info_count} | PASS {pass_count}",
110    ));
111
112    for hint in hints {
113        lines.push(format!("Hint [{}] {}", hint.rule, hint.title));
114        lines.push(format!("  {}", hint.message));
115    }
116
117    lines.join("\n")
118}
119
120pub fn format_json(
121    diagnostics: &[Diagnostic],
122    file_count: usize,
123    function_count: usize,
124    metrics: &ProjectMetrics,
125    unfiltered_summary: Option<&SummaryStats>,
126    hints: &[Hint],
127) -> String {
128    let (block_count, warn_count, info_count, pass_count) = if let Some(stats) = unfiltered_summary
129    {
130        (
131            stats.block_count,
132            stats.warn_count,
133            stats.info_count,
134            stats.pass_count,
135        )
136    } else {
137        let block_count = diagnostics
138            .iter()
139            .filter(|d| d.severity == Severity::Block)
140            .count();
141        let warn_count = diagnostics
142            .iter()
143            .filter(|d| d.severity == Severity::Warn)
144            .count();
145        let info_count = diagnostics
146            .iter()
147            .filter(|d| d.severity == Severity::Info)
148            .count();
149        let violated = count_violated_functions(diagnostics);
150        let pass_count = function_count.saturating_sub(violated);
151        (block_count, warn_count, info_count, pass_count)
152    };
153
154    let mut output = serde_json::json!({
155        "version": env!("CARGO_PKG_VERSION"),
156        "summary": {
157            "files": file_count,
158            "functions": function_count,
159            "block": block_count,
160            "warn": warn_count,
161            "info": info_count,
162            "pass": pass_count,
163        },
164        "diagnostics": diagnostics,
165        "metrics": serde_json::to_value(metrics).unwrap_or_default(),
166    });
167
168    if file_count == 0 {
169        output["guidance"] = serde_json::json!("No test files found. Check --lang filter or run from a directory containing test files.");
170    }
171    if !hints.is_empty() {
172        output["hints"] = serde_json::to_value(hints).unwrap_or_default();
173    }
174    serde_json::to_string_pretty(&output).unwrap_or_else(|_| "{}".to_string())
175}
176
177struct RuleMeta {
178    id: &'static str,
179    name: &'static str,
180    short_description: &'static str,
181    guidance: &'static str,
182}
183
184const RULE_REGISTRY: &[RuleMeta] = &[
185    RuleMeta {
186        id: "T001",
187        name: "assertion-free",
188        short_description: "Test function has no assertions",
189        guidance: "This test does not express a specification -- it only verifies \"no crash.\" Ask: what observable outcome should this function guarantee? Assert the return value, state change, or side effect instead.",
190    },
191    RuleMeta {
192        id: "T002",
193        name: "mock-overuse",
194        short_description: "Test function uses too many mocks",
195        guidance: "Too many mocks can make the test fragile and coupled to implementation. Consider using fewer mocks and testing through real collaborators where possible. Extract the core logic into a pure function that can be tested without mocks.",
196    },
197    RuleMeta {
198        id: "T003",
199        name: "giant-test",
200        short_description: "Test function exceeds line count threshold",
201        guidance: "A large test is hard to understand and maintain. Split it into smaller, focused tests -- each verifying one behavior. Use helper functions or parameterized tests to reduce repetition.",
202    },
203    RuleMeta {
204        id: "T004",
205        name: "no-parameterized",
206        short_description: "Low ratio of parameterized tests",
207        guidance: "Repeated tests with different inputs can be consolidated using parameterized/table-driven tests. This improves coverage while reducing code volume and making the test suite easier to extend.",
208    },
209    RuleMeta {
210        id: "T005",
211        name: "pbt-missing",
212        short_description: "No property-based testing library imported",
213        guidance: "Property-based testing (PBT) can find edge cases that example-based tests miss. Consider adding a PBT library (e.g., Hypothesis, fast-check, proptest) for functions with wide input domains.",
214    },
215    RuleMeta {
216        id: "T006",
217        name: "low-assertion-density",
218        short_description: "Low assertion count per test function",
219        guidance: "A test with few assertions may not fully verify the behavior under test. Ensure the test checks all relevant aspects of the output -- return values, state changes, and side effects.",
220    },
221    RuleMeta {
222        id: "T007",
223        name: "test-source-ratio",
224        short_description: "Test file to source file ratio",
225        guidance: "The test-to-source file ratio is outside the expected range. Consider whether test files are missing for some source modules, or whether test files are over-concentrated.",
226    },
227    RuleMeta {
228        id: "T008",
229        name: "no-contract",
230        short_description: "No contract testing library used in tests",
231        guidance: "Contract tests verify that API boundaries (HTTP, message queues, etc.) behave as agreed. Consider adding a contract testing library (e.g., Pact) if this project has service-to-service communication.",
232    },
233    RuleMeta {
234        id: "T101",
235        name: "how-not-what",
236        short_description: "Test verifies implementation rather than behavior",
237        guidance: "This test accesses private/internal members, coupling it to implementation details. Test the public interface instead -- assert on observable outputs rather than internal state.",
238    },
239    RuleMeta {
240        id: "T102",
241        name: "fixture-sprawl",
242        short_description: "Test depends on too many fixtures",
243        guidance: "This test depends on many fixtures, making it hard to understand what is actually being tested. Reduce fixture dependencies by inlining setup or using builder patterns.",
244    },
245    RuleMeta {
246        id: "T103",
247        name: "missing-error-test",
248        short_description: "No error/exception test found in file",
249        guidance: "No test verifies error/exception behavior in this file. Add tests for invalid inputs, boundary conditions, and expected failure modes.",
250    },
251    RuleMeta {
252        id: "T105",
253        name: "deterministic-no-metamorphic",
254        short_description: "All assertions use exact equality, no relational checks",
255        guidance: "All assertions use exact equality (==). Consider adding relational assertions (>, <, contains, matches) to express behavioral properties rather than specific outputs.",
256    },
257    RuleMeta {
258        id: "T106",
259        name: "duplicate-literal-assertion",
260        short_description: "Same literal appears multiple times in assertions",
261        guidance: "The same literal value appears in multiple assertions, creating a maintenance burden. Extract it into a named constant or verify the underlying relationship instead.",
262    },
263    RuleMeta {
264        id: "T107",
265        name: "assertion-roulette",
266        short_description: "Multiple assertions without failure messages",
267        guidance: "Multiple assertions without descriptive messages make failures hard to diagnose. Add a failure message to each assertion explaining what was expected and why.",
268    },
269    RuleMeta {
270        id: "T108",
271        name: "wait-and-see",
272        short_description: "Test uses sleep/delay causing flaky tests",
273        guidance: "Using sleep/delay in tests causes flakiness and slow execution. Use polling with timeouts, event-based synchronization, or dependency injection to control timing.",
274    },
275    RuleMeta {
276        id: "T109",
277        name: "undescriptive-test-name",
278        short_description: "Test name does not describe behavior",
279        guidance: "The test name does not describe the expected behavior. Rename it to follow the pattern: given_[context]_when_[action]_then_[outcome] or a similar descriptive convention.",
280    },
281    RuleMeta {
282        id: "T110",
283        name: "skip-only-test",
284        short_description: "Test skips or marks incomplete without assertions",
285        guidance: "This test is skipped or marked incomplete without assertions. Either implement the test or remove the skip marker. Dead test code erodes trust in the suite.",
286    },
287];
288
289pub fn format_sarif(diagnostics: &[Diagnostic]) -> String {
290    use serde_sarif::sarif;
291
292    let rules: Vec<sarif::ReportingDescriptor> = RULE_REGISTRY
293        .iter()
294        .map(|r| {
295            sarif::ReportingDescriptor::builder()
296                .id(r.id)
297                .name(r.name)
298                .short_description(&String::from(r.short_description))
299                .build()
300        })
301        .collect();
302
303    let results: Vec<sarif::Result> = diagnostics
304        .iter()
305        .map(|d| {
306            let level = match d.severity {
307                Severity::Block => sarif::ResultLevel::Error,
308                Severity::Warn => sarif::ResultLevel::Warning,
309                Severity::Info => sarif::ResultLevel::Note,
310            };
311            let start_line = d.line.unwrap_or(1) as i64;
312            let location = sarif::Location::builder()
313                .physical_location(
314                    sarif::PhysicalLocation::builder()
315                        .artifact_location(sarif::ArtifactLocation::builder().uri(&d.file).build())
316                        .region(sarif::Region::builder().start_line(start_line).build())
317                        .build(),
318                )
319                .build();
320
321            sarif::Result::builder()
322                .rule_id(&d.rule.0)
323                .message(sarif::Message::builder().text(&d.message).build())
324                .level(level)
325                .locations(vec![location])
326                .build()
327        })
328        .collect();
329
330    let tool_component = sarif::ToolComponent::builder()
331        .name("exspec")
332        .version(env!("CARGO_PKG_VERSION"))
333        .rules(rules)
334        .build();
335
336    let invocation = sarif::Invocation::builder()
337        .execution_successful(true)
338        .build();
339
340    let run = sarif::Run::builder()
341        .tool(tool_component)
342        .results(results)
343        .invocations(vec![invocation])
344        .build();
345
346    let sarif_doc = sarif::Sarif::builder()
347        .version(sarif::Version::V2_1_0.to_string())
348        .schema(sarif::SCHEMA_URL)
349        .runs(vec![run])
350        .build();
351
352    serde_json::to_string_pretty(&sarif_doc).unwrap_or_else(|_| "{}".to_string())
353}
354
355pub fn format_ai_prompt(
356    diagnostics: &[Diagnostic],
357    file_count: usize,
358    function_count: usize,
359    metrics: &ProjectMetrics,
360    hints: &[Hint],
361) -> String {
362    let _ = (file_count, metrics, hints);
363
364    let block_count = diagnostics
365        .iter()
366        .filter(|d| d.severity == Severity::Block)
367        .count();
368    let warn_count = diagnostics
369        .iter()
370        .filter(|d| d.severity == Severity::Warn)
371        .count();
372    let info_count = diagnostics
373        .iter()
374        .filter(|d| d.severity == Severity::Info)
375        .count();
376    let violated = count_violated_functions(diagnostics);
377    let pass_count = function_count.saturating_sub(violated);
378
379    let mut lines = Vec::new();
380    lines.push("# exspec -- Test Quality Report".to_string());
381    lines.push(String::new());
382
383    // BLOCK section
384    let block_diags: Vec<&Diagnostic> = diagnostics
385        .iter()
386        .filter(|d| d.severity == Severity::Block)
387        .collect();
388    if !block_diags.is_empty() {
389        lines.push("## BLOCK (must fix)".to_string());
390        lines.push(String::new());
391        for d in block_diags {
392            let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
393            lines.push(format!("### {}{}", d.file, line_str));
394            lines.push(String::new());
395            lines.push(format!("**{}**: {}", d.rule, d.message));
396            lines.push(String::new());
397            if let Some(meta) = RULE_REGISTRY.iter().find(|m| m.id == d.rule.0) {
398                for guidance_line in meta.guidance.lines() {
399                    lines.push(format!("> {guidance_line}"));
400                }
401                lines.push(String::new());
402            }
403        }
404    }
405
406    // WARN section
407    let warn_diags: Vec<&Diagnostic> = diagnostics
408        .iter()
409        .filter(|d| d.severity == Severity::Warn)
410        .collect();
411    if !warn_diags.is_empty() {
412        lines.push("## WARN (should fix)".to_string());
413        lines.push(String::new());
414        for d in warn_diags {
415            let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
416            lines.push(format!("### {}{}", d.file, line_str));
417            lines.push(String::new());
418            lines.push(format!("**{}**: {}", d.rule, d.message));
419            lines.push(String::new());
420            if let Some(meta) = RULE_REGISTRY.iter().find(|m| m.id == d.rule.0) {
421                for guidance_line in meta.guidance.lines() {
422                    lines.push(format!("> {guidance_line}"));
423                }
424                lines.push(String::new());
425            }
426        }
427    }
428
429    // INFO section
430    let info_diags: Vec<&Diagnostic> = diagnostics
431        .iter()
432        .filter(|d| d.severity == Severity::Info)
433        .collect();
434    if !info_diags.is_empty() {
435        lines.push("## INFO (consider)".to_string());
436        lines.push(String::new());
437        for d in info_diags {
438            let line_str = d.line.map(|l| format!(":{l}")).unwrap_or_default();
439            lines.push(format!(
440                "- {}{} {}: {}",
441                d.file, line_str, d.rule, d.message
442            ));
443        }
444        lines.push(String::new());
445    }
446
447    lines.push(format!(
448        "Score: BLOCK {block_count} | WARN {warn_count} | INFO {info_count} | PASS {pass_count}",
449    ));
450
451    lines.join("\n")
452}
453
454/// Filter diagnostics to only include those at or above the given minimum severity.
455pub fn filter_by_severity(diagnostics: &[Diagnostic], min: Severity) -> Vec<Diagnostic> {
456    diagnostics
457        .iter()
458        .filter(|d| d.severity >= min)
459        .cloned()
460        .collect()
461}
462
463/// Summary statistics from unfiltered diagnostics, for JSON/SARIF output.
464#[derive(Debug, Clone, PartialEq, Eq)]
465pub struct SummaryStats {
466    pub block_count: usize,
467    pub warn_count: usize,
468    pub info_count: usize,
469    pub pass_count: usize,
470}
471
472impl SummaryStats {
473    pub fn from_diagnostics(diagnostics: &[Diagnostic], function_count: usize) -> Self {
474        let block_count = diagnostics
475            .iter()
476            .filter(|d| d.severity == Severity::Block)
477            .count();
478        let warn_count = diagnostics
479            .iter()
480            .filter(|d| d.severity == Severity::Warn)
481            .count();
482        let info_count = diagnostics
483            .iter()
484            .filter(|d| d.severity == Severity::Info)
485            .count();
486        let violated = count_violated_functions(diagnostics);
487        let pass_count = function_count.saturating_sub(violated);
488        Self {
489            block_count,
490            warn_count,
491            info_count,
492            pass_count,
493        }
494    }
495}
496
497pub fn compute_exit_code(diagnostics: &[Diagnostic], strict: bool) -> i32 {
498    for d in diagnostics {
499        if d.severity == Severity::Block {
500            return 1;
501        }
502    }
503    if strict {
504        for d in diagnostics {
505            if d.severity == Severity::Warn {
506                return 1;
507            }
508        }
509    }
510    0
511}
512
513#[cfg(test)]
514mod tests {
515    use super::*;
516    use crate::rules::RuleId;
517
518    fn block_diag() -> Diagnostic {
519        Diagnostic {
520            rule: RuleId::new("T001"),
521            severity: Severity::Block,
522            file: "test.py".to_string(),
523            line: Some(10),
524            message: "assertion-free: test has no assertions".to_string(),
525            details: None,
526        }
527    }
528
529    fn warn_diag() -> Diagnostic {
530        Diagnostic {
531            rule: RuleId::new("T003"),
532            severity: Severity::Warn,
533            file: "test.py".to_string(),
534            line: Some(5),
535            message: "giant-test: 73 lines, threshold: 50".to_string(),
536            details: None,
537        }
538    }
539
540    // --- Terminal format ---
541
542    #[test]
543    fn terminal_format_has_summary_header() {
544        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
545        assert!(output.starts_with("exspec v"));
546        assert!(output.contains("1 test files"));
547        assert!(output.contains("1 test functions"));
548    }
549
550    #[test]
551    fn terminal_format_has_score_footer() {
552        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
553        assert!(output.contains("Score: BLOCK 1 | WARN 0 | INFO 0 | PASS 0"));
554    }
555
556    #[test]
557    fn terminal_format_block() {
558        let output = format_terminal(&[block_diag()], 1, 1, &ProjectMetrics::default(), &[]);
559        assert!(output.contains("BLOCK test.py:10 T001 assertion-free: test has no assertions"));
560    }
561
562    #[test]
563    fn terminal_format_warn() {
564        let output = format_terminal(&[warn_diag()], 1, 1, &ProjectMetrics::default(), &[]);
565        assert!(output.contains("WARN test.py:5 T003 giant-test: 73 lines, threshold: 50"));
566    }
567
568    #[test]
569    fn terminal_format_multiple() {
570        let output = format_terminal(
571            &[block_diag(), warn_diag()],
572            2,
573            2,
574            &ProjectMetrics::default(),
575            &[],
576        );
577        assert!(output.contains("BLOCK"));
578        assert!(output.contains("WARN"));
579    }
580
581    #[test]
582    fn terminal_format_empty_has_header_and_footer() {
583        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default(), &[]);
584        assert!(output.contains("exspec v"));
585        assert!(output.contains("Score:"));
586    }
587
588    // --- JSON format ---
589
590    #[test]
591    fn json_format_has_version_and_summary() {
592        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None, &[]);
593        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
594        assert!(parsed["version"].is_string());
595        assert!(parsed["summary"].is_object());
596        assert_eq!(parsed["summary"]["files"], 1);
597        assert_eq!(parsed["summary"]["functions"], 1);
598        assert_eq!(parsed["summary"]["block"], 1);
599        assert_eq!(parsed["summary"]["warn"], 0);
600        assert_eq!(parsed["summary"]["pass"], 0);
601    }
602
603    #[test]
604    fn json_format_has_diagnostics_and_metrics() {
605        let output = format_json(&[block_diag()], 1, 1, &ProjectMetrics::default(), None, &[]);
606        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
607        assert!(parsed["diagnostics"].is_array());
608        assert!(parsed["metrics"].is_object());
609        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 1);
610    }
611
612    #[test]
613    fn json_format_empty() {
614        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None, &[]);
615        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
616        assert_eq!(parsed["diagnostics"].as_array().unwrap().len(), 0);
617        assert_eq!(parsed["summary"]["functions"], 0);
618    }
619
620    // --- Exit code ---
621
622    // --- Empty result UX ---
623
624    #[test]
625    fn terminal_format_zero_files_shows_guidance() {
626        let output = format_terminal(&[], 0, 0, &ProjectMetrics::default(), &[]);
627        assert!(
628            output.contains("No test files found"),
629            "expected guidance message, got: {output}"
630        );
631    }
632
633    #[test]
634    fn json_format_zero_files_has_guidance() {
635        let output = format_json(&[], 0, 0, &ProjectMetrics::default(), None, &[]);
636        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
637        assert!(parsed["guidance"].is_string());
638    }
639
640    // --- pass_count multi-violation ---
641
642    #[test]
643    fn pass_count_with_multi_violation_function() {
644        let d1 = Diagnostic {
645            rule: RuleId::new("T001"),
646            severity: Severity::Block,
647            file: "test.py".to_string(),
648            line: Some(10),
649            message: "assertion-free".to_string(),
650            details: None,
651        };
652        let d2 = Diagnostic {
653            rule: RuleId::new("T003"),
654            severity: Severity::Warn,
655            file: "test.py".to_string(),
656            line: Some(10),
657            message: "giant-test".to_string(),
658            details: None,
659        };
660        let output = format_terminal(&[d1, d2], 1, 2, &ProjectMetrics::default(), &[]);
661        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
662    }
663
664    #[test]
665    fn pass_count_excludes_file_level_diagnostics() {
666        let d1 = Diagnostic {
667            rule: RuleId::new("T004"),
668            severity: Severity::Info,
669            file: "test.py".to_string(),
670            line: None,
671            message: "no-parameterized".to_string(),
672            details: None,
673        };
674        let output = format_terminal(&[d1], 1, 1, &ProjectMetrics::default(), &[]);
675        assert!(output.contains("PASS 1"), "expected PASS 1, got: {output}");
676    }
677
678    #[test]
679    fn terminal_format_nonzero_files_no_guidance() {
680        let output = format_terminal(&[], 1, 0, &ProjectMetrics::default(), &[]);
681        assert!(!output.contains("No test files found"));
682    }
683
684    #[test]
685    fn exit_code_block_returns_1() {
686        assert_eq!(compute_exit_code(&[block_diag()], false), 1);
687    }
688
689    #[test]
690    fn exit_code_warn_only_returns_0() {
691        assert_eq!(compute_exit_code(&[warn_diag()], false), 0);
692    }
693
694    #[test]
695    fn exit_code_strict_warn_returns_1() {
696        assert_eq!(compute_exit_code(&[warn_diag()], true), 1);
697    }
698
699    #[test]
700    fn exit_code_empty_returns_0() {
701        assert_eq!(compute_exit_code(&[], false), 0);
702    }
703
704    // --- Metrics display ---
705
706    #[test]
707    fn terminal_metrics_section_between_diagnostics_and_score() {
708        let metrics = ProjectMetrics {
709            mock_density_avg: 2.3,
710            mock_class_max: 4,
711            parameterized_ratio: 0.15,
712            pbt_ratio: 0.4,
713            assertion_density_avg: 1.8,
714            contract_coverage: 0.2,
715            ..Default::default()
716        };
717        let output = format_terminal(&[block_diag()], 5, 187, &metrics, &[]);
718        let metrics_pos = output.find("Metrics:").expect("Metrics section missing");
719        let diag_pos = output.find("BLOCK test.py").expect("diagnostic missing");
720        let score_pos = output.find("Score:").expect("Score missing");
721        assert!(
722            diag_pos < metrics_pos,
723            "Metrics should come after diagnostics"
724        );
725        assert!(metrics_pos < score_pos, "Metrics should come before Score");
726    }
727
728    #[test]
729    fn terminal_metrics_mock_density_line() {
730        let metrics = ProjectMetrics {
731            mock_density_avg: 2.3,
732            mock_class_max: 4,
733            ..Default::default()
734        };
735        let output = format_terminal(&[], 1, 1, &metrics, &[]);
736        assert!(
737            output.contains("2.3/test (avg)"),
738            "mock density avg: {output}"
739        );
740        assert!(
741            output.contains("4 distinct classes/test (max)"),
742            "mock class max: {output}"
743        );
744    }
745
746    #[test]
747    fn terminal_metrics_parameterized_line() {
748        let metrics = ProjectMetrics {
749            parameterized_ratio: 0.15,
750            ..Default::default()
751        };
752        let output = format_terminal(&[], 5, 20, &metrics, &[]);
753        assert!(output.contains("15%"), "parameterized pct: {output}");
754        assert!(output.contains("3/20"), "parameterized fraction: {output}");
755    }
756
757    #[test]
758    fn terminal_metrics_pbt_and_contract_file_count() {
759        let metrics = ProjectMetrics {
760            pbt_ratio: 0.4,
761            contract_coverage: 0.2,
762            ..Default::default()
763        };
764        let output = format_terminal(&[], 5, 1, &metrics, &[]);
765        assert!(output.contains("2/5 files"), "pbt files: {output}");
766        assert!(output.contains("1/5 files"), "contract files: {output}");
767    }
768
769    #[test]
770    fn json_metrics_has_all_fields() {
771        let metrics = ProjectMetrics {
772            mock_density_avg: 1.5,
773            mock_class_max: 2,
774            parameterized_ratio: 0.3,
775            pbt_ratio: 0.5,
776            assertion_density_avg: 2.0,
777            contract_coverage: 0.1,
778            test_source_ratio: 0.8,
779        };
780        let output = format_json(&[], 1, 1, &metrics, None, &[]);
781        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
782        let m = &parsed["metrics"];
783        assert_eq!(m["mock_density_avg"], 1.5);
784        assert_eq!(m["mock_class_max"], 2);
785        assert_eq!(m["parameterized_ratio"], 0.3);
786        assert_eq!(m["pbt_ratio"], 0.5);
787        assert_eq!(m["assertion_density_avg"], 2.0);
788        assert_eq!(m["contract_coverage"], 0.1);
789        assert_eq!(m["test_source_ratio"], 0.8);
790    }
791
792    #[test]
793    fn json_metrics_values_are_numbers() {
794        let metrics = ProjectMetrics {
795            mock_density_avg: 1.0,
796            mock_class_max: 3,
797            ..Default::default()
798        };
799        let output = format_json(&[], 1, 1, &metrics, None, &[]);
800        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
801        assert!(parsed["metrics"]["mock_density_avg"].is_number());
802        assert!(parsed["metrics"]["mock_class_max"].is_number());
803    }
804
805    // --- SARIF format ---
806
807    fn info_diag() -> Diagnostic {
808        Diagnostic {
809            rule: RuleId::new("T005"),
810            severity: Severity::Info,
811            file: "test.py".to_string(),
812            line: None,
813            message: "pbt-missing".to_string(),
814            details: None,
815        }
816    }
817
818    fn parse_sarif(output: &str) -> serde_json::Value {
819        serde_json::from_str(output).expect("SARIF should be valid JSON")
820    }
821
822    #[test]
823    fn sarif_valid_json() {
824        let output = format_sarif(&[block_diag()]);
825        parse_sarif(&output);
826    }
827
828    #[test]
829    fn sarif_has_schema_url() {
830        let output = format_sarif(&[]);
831        let parsed = parse_sarif(&output);
832        assert!(parsed["$schema"].is_string());
833        assert!(parsed["$schema"].as_str().unwrap().contains("sarif"));
834    }
835
836    #[test]
837    fn sarif_version_2_1_0() {
838        let output = format_sarif(&[]);
839        let parsed = parse_sarif(&output);
840        assert_eq!(parsed["version"], "2.1.0");
841    }
842
843    #[test]
844    fn sarif_tool_driver_name() {
845        let output = format_sarif(&[]);
846        let parsed = parse_sarif(&output);
847        assert_eq!(parsed["runs"][0]["tool"]["driver"]["name"], "exspec");
848    }
849
850    #[test]
851    fn sarif_tool_driver_version() {
852        let output = format_sarif(&[]);
853        let parsed = parse_sarif(&output);
854        assert_eq!(
855            parsed["runs"][0]["tool"]["driver"]["version"],
856            env!("CARGO_PKG_VERSION")
857        );
858    }
859
860    #[test]
861    fn sarif_rules_match_registry_count() {
862        let output = format_sarif(&[]);
863        let parsed = parse_sarif(&output);
864        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
865            .as_array()
866            .unwrap();
867        assert_eq!(rules.len(), RULE_REGISTRY.len());
868    }
869
870    #[test]
871    fn sarif_rules_have_short_description() {
872        let output = format_sarif(&[]);
873        let parsed = parse_sarif(&output);
874        let rule0 = &parsed["runs"][0]["tool"]["driver"]["rules"][0];
875        assert!(rule0["shortDescription"].is_object());
876        assert!(rule0["shortDescription"]["text"].is_string());
877    }
878
879    #[test]
880    fn sarif_rules_include_all_registry_entries() {
881        let output = format_sarif(&[]);
882        let parsed = parse_sarif(&output);
883        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
884            .as_array()
885            .unwrap();
886        for meta in RULE_REGISTRY {
887            assert!(
888                rules.iter().any(|rule| rule["id"] == meta.id),
889                "SARIF rules array should include {} metadata",
890                meta.id
891            );
892        }
893    }
894
895    #[test]
896    fn sarif_block_maps_to_error() {
897        let output = format_sarif(&[block_diag()]);
898        let parsed = parse_sarif(&output);
899        assert_eq!(parsed["runs"][0]["results"][0]["level"], "error");
900    }
901
902    #[test]
903    fn sarif_warn_maps_to_warning() {
904        let output = format_sarif(&[warn_diag()]);
905        let parsed = parse_sarif(&output);
906        assert_eq!(parsed["runs"][0]["results"][0]["level"], "warning");
907    }
908
909    #[test]
910    fn sarif_info_maps_to_note() {
911        let output = format_sarif(&[info_diag()]);
912        let parsed = parse_sarif(&output);
913        assert_eq!(parsed["runs"][0]["results"][0]["level"], "note");
914    }
915
916    #[test]
917    fn sarif_file_level_diag_start_line_1() {
918        let output = format_sarif(&[info_diag()]);
919        let parsed = parse_sarif(&output);
920        let region = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"]["region"];
921        assert_eq!(region["startLine"], 1);
922    }
923
924    #[test]
925    fn sarif_result_has_location_and_uri() {
926        let output = format_sarif(&[block_diag()]);
927        let parsed = parse_sarif(&output);
928        let loc = &parsed["runs"][0]["results"][0]["locations"][0]["physicalLocation"];
929        assert_eq!(loc["artifactLocation"]["uri"], "test.py");
930        assert_eq!(loc["region"]["startLine"], 10);
931    }
932
933    #[test]
934    fn sarif_empty_diagnostics_empty_results() {
935        let output = format_sarif(&[]);
936        let parsed = parse_sarif(&output);
937        let results = parsed["runs"][0]["results"].as_array().unwrap();
938        assert!(results.is_empty());
939        let rules = parsed["runs"][0]["tool"]["driver"]["rules"]
940            .as_array()
941            .unwrap();
942        assert_eq!(rules.len(), RULE_REGISTRY.len());
943    }
944
945    // --- #59: filter_by_severity ---
946
947    #[test]
948    fn filter_by_severity_block_keeps_only_block() {
949        let diags = vec![block_diag(), warn_diag(), info_diag()];
950        let filtered = filter_by_severity(&diags, Severity::Block);
951        assert_eq!(filtered.len(), 1);
952        assert_eq!(filtered[0].severity, Severity::Block);
953    }
954
955    #[test]
956    fn filter_by_severity_warn_keeps_block_and_warn() {
957        let diags = vec![block_diag(), warn_diag(), info_diag()];
958        let filtered = filter_by_severity(&diags, Severity::Warn);
959        assert_eq!(filtered.len(), 2);
960    }
961
962    #[test]
963    fn filter_by_severity_info_keeps_all() {
964        let diags = vec![block_diag(), warn_diag(), info_diag()];
965        let filtered = filter_by_severity(&diags, Severity::Info);
966        assert_eq!(filtered.len(), 3);
967    }
968
969    #[test]
970    fn filter_by_severity_empty_input() {
971        let filtered = filter_by_severity(&[], Severity::Block);
972        assert!(filtered.is_empty());
973    }
974
975    // --- #59: SummaryStats ---
976
977    #[test]
978    fn summary_stats_from_diagnostics() {
979        let diags = vec![block_diag(), warn_diag(), info_diag()];
980        let stats = SummaryStats::from_diagnostics(&diags, 5);
981        assert_eq!(stats.block_count, 1);
982        assert_eq!(stats.warn_count, 1);
983        assert_eq!(stats.info_count, 1);
984        // 3 distinct (file,line) violated out of 5 → pass=3 except info_diag has line=None
985        // block_diag line=10, warn_diag line=5, info_diag line=None → 2 violated → pass=3
986        assert_eq!(stats.pass_count, 3);
987    }
988
989    // --- #59: Terminal filtered score ---
990
991    #[test]
992    fn terminal_format_filtered_shows_filtered_counts() {
993        let filtered = vec![block_diag()]; // original had block+warn+info but filtered to block only
994        let output = format_terminal(&filtered, 1, 3, &ProjectMetrics::default(), &[]);
995        assert!(
996            output.contains("BLOCK 1 | WARN 0 | INFO 0"),
997            "score should reflect filtered diagnostics: {output}"
998        );
999        assert!(!output.contains("WARN test.py"));
1000    }
1001
1002    // --- #59: JSON filtered array + unfiltered summary ---
1003
1004    #[test]
1005    fn json_filtered_diagnostics_with_unfiltered_summary() {
1006        let filtered = vec![block_diag()];
1007        let stats = SummaryStats {
1008            block_count: 1,
1009            warn_count: 1,
1010            info_count: 1,
1011            pass_count: 2,
1012        };
1013        let output = format_json(
1014            &filtered,
1015            1,
1016            5,
1017            &ProjectMetrics::default(),
1018            Some(&stats),
1019            &[],
1020        );
1021        let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
1022        assert_eq!(
1023            parsed["diagnostics"].as_array().unwrap().len(),
1024            1,
1025            "diagnostics should be filtered"
1026        );
1027        assert_eq!(parsed["summary"]["warn"], 1, "summary should be unfiltered");
1028        assert_eq!(parsed["summary"]["info"], 1, "summary should be unfiltered");
1029    }
1030
1031    #[test]
1032    fn sarif_invocations_execution_successful() {
1033        let output = format_sarif(&[]);
1034        let parsed = parse_sarif(&output);
1035        assert_eq!(
1036            parsed["runs"][0]["invocations"][0]["executionSuccessful"],
1037            true
1038        );
1039    }
1040
1041    // --- AI-FMT-01: BLOCK diagnostic with guidance ---
1042
1043    #[test]
1044    fn ai_prompt_block_has_section_and_guidance() {
1045        // Given: T001 (assertion-free) の BLOCK diagnostic 1件
1046        let diag = block_diag();
1047        // When: format_ai_prompt() を呼ぶ
1048        let output = format_ai_prompt(&[diag], 1, 1, &ProjectMetrics::default(), &[]);
1049        // Then: "## BLOCK" セクション、file:line + rule + message、guidance (blockquote `>`) が含まれる
1050        assert!(
1051            output.contains("## BLOCK"),
1052            "should contain BLOCK section: {output}"
1053        );
1054        assert!(
1055            output.contains("test.py:10"),
1056            "should contain file:line: {output}"
1057        );
1058        assert!(output.contains("T001"), "should contain rule id: {output}");
1059        assert!(
1060            output.contains("assertion-free"),
1061            "should contain rule name in message: {output}"
1062        );
1063        let has_blockquote = output.lines().any(|l| l.starts_with('>'));
1064        assert!(
1065            has_blockquote,
1066            "should contain guidance blockquote: {output}"
1067        );
1068    }
1069
1070    // --- AI-FMT-02: WARN diagnostic with guidance ---
1071
1072    #[test]
1073    fn ai_prompt_warn_has_section_and_guidance() {
1074        // Given: T003 (giant-test) の WARN diagnostic 1件
1075        let diag = warn_diag();
1076        // When: format_ai_prompt() を呼ぶ
1077        let output = format_ai_prompt(&[diag], 1, 1, &ProjectMetrics::default(), &[]);
1078        // Then: "## WARN" セクション、diagnostic、guidance が含まれる
1079        assert!(
1080            output.contains("## WARN"),
1081            "should contain WARN section: {output}"
1082        );
1083        assert!(
1084            output.contains("test.py:5"),
1085            "should contain file:line: {output}"
1086        );
1087        assert!(output.contains("T003"), "should contain rule id: {output}");
1088        let has_blockquote = output.lines().any(|l| l.starts_with('>'));
1089        assert!(
1090            has_blockquote,
1091            "should contain guidance blockquote: {output}"
1092        );
1093    }
1094
1095    // --- AI-FMT-03: INFO diagnostic without guidance ---
1096
1097    #[test]
1098    fn ai_prompt_info_has_section_no_guidance() {
1099        // Given: T005 (pbt-missing) の INFO diagnostic 1件
1100        let diag = info_diag();
1101        // When: format_ai_prompt() を呼ぶ
1102        let output = format_ai_prompt(&[diag], 1, 1, &ProjectMetrics::default(), &[]);
1103        // Then: "## INFO" セクション、`- ` で始まる1行表示、blockquote (`>`) なし
1104        assert!(
1105            output.contains("## INFO"),
1106            "should contain INFO section: {output}"
1107        );
1108        let has_bullet = output.lines().any(|l| l.starts_with("- "));
1109        assert!(
1110            has_bullet,
1111            "INFO items should use bullet `- ` format: {output}"
1112        );
1113        let has_blockquote = output.lines().any(|l| l.starts_with('>'));
1114        assert!(
1115            !has_blockquote,
1116            "INFO should NOT contain guidance blockquote: {output}"
1117        );
1118    }
1119
1120    // --- AI-FMT-04: Mixed severities grouped correctly ---
1121
1122    #[test]
1123    fn ai_prompt_mixed_severities_ordered_block_warn_info() {
1124        // Given: BLOCK + WARN + INFO の3件
1125        let diags = [block_diag(), warn_diag(), info_diag()];
1126        // When: format_ai_prompt() を呼ぶ
1127        let output = format_ai_prompt(&diags, 1, 3, &ProjectMetrics::default(), &[]);
1128        // Then: "## BLOCK" < "## WARN" < "## INFO" の順序
1129        let block_pos = output.find("## BLOCK").expect("## BLOCK not found");
1130        let warn_pos = output.find("## WARN").expect("## WARN not found");
1131        let info_pos = output.find("## INFO").expect("## INFO not found");
1132        assert!(
1133            block_pos < warn_pos,
1134            "## BLOCK should appear before ## WARN"
1135        );
1136        assert!(warn_pos < info_pos, "## WARN should appear before ## INFO");
1137    }
1138
1139    // --- AI-FMT-05: Empty diagnostics ---
1140
1141    #[test]
1142    fn ai_prompt_empty_has_header_and_score_no_sections() {
1143        // Given: 空の diagnostics
1144        // When: format_ai_prompt() を呼ぶ
1145        let output = format_ai_prompt(&[], 0, 0, &ProjectMetrics::default(), &[]);
1146        // Then: "# exspec" ヘッダーと "Score:" 行が含まれ、"## BLOCK/WARN/INFO" は含まれない
1147        assert!(
1148            output.contains("# exspec"),
1149            "should contain # exspec header: {output}"
1150        );
1151        assert!(
1152            output.contains("Score:"),
1153            "should contain Score: line: {output}"
1154        );
1155        assert!(
1156            !output.contains("## BLOCK"),
1157            "empty result should not contain ## BLOCK: {output}"
1158        );
1159        assert!(
1160            !output.contains("## WARN"),
1161            "empty result should not contain ## WARN: {output}"
1162        );
1163        assert!(
1164            !output.contains("## INFO"),
1165            "empty result should not contain ## INFO: {output}"
1166        );
1167    }
1168
1169    // --- AI-FMT-06: All rules have non-empty guidance ---
1170
1171    #[test]
1172    fn all_rules_have_non_empty_guidance() {
1173        // Given: RULE_REGISTRY の全エントリ
1174        // When: 各 guidance を検査
1175        // Then: 全て空文字列でない
1176        for meta in RULE_REGISTRY {
1177            assert!(
1178                !meta.guidance.is_empty(),
1179                "rule {} ({}) should have non-empty guidance",
1180                meta.id,
1181                meta.name
1182            );
1183        }
1184    }
1185
1186    // --- AI-FMT-08: Score line present ---
1187
1188    #[test]
1189    fn ai_prompt_score_line_reflects_counts() {
1190        // Given: BLOCK 1件 + WARN 1件 の diagnostics
1191        let diags = [block_diag(), warn_diag()];
1192        // When: format_ai_prompt() を呼ぶ
1193        let output = format_ai_prompt(&diags, 1, 2, &ProjectMetrics::default(), &[]);
1194        // Then: "Score: BLOCK 1 | WARN 1" を含む行が存在
1195        assert!(
1196            output.contains("Score: BLOCK 1 | WARN 1"),
1197            "should contain score summary: {output}"
1198        );
1199    }
1200}