Skip to main content

libverify_core/
coverage.rs

1//! LCOV-based coverage analysis for change request diffs.
2//!
3//! Pure functions that parse LCOV reports, extract changed lines from
4//! unified diffs, and classify test coverage severity. No I/O — all
5//! inputs are string slices provided by the CLI layer.
6//!
7//! `classify_coverage_severity` is formally verified by Creusot in
8//! `gh-verify-verif`.
9
10use std::collections::BTreeMap;
11
12use serde::{Deserialize, Serialize};
13
14use crate::verdict::Severity;
15
16// --- Types ---
17
18/// LCOV parse error. Explicit enum (no anyhow in core crate).
19#[derive(Debug, Clone, PartialEq, Eq)]
20pub enum ParseError {
21    MalformedLine { line_number: usize, content: String },
22}
23
24/// Per-file coverage data. Corresponds to one SF..end_of_record block in LCOV.
25#[derive(Debug, Clone, Serialize, Deserialize)]
26pub struct FileCoverage {
27    pub path: String,
28    /// line_number -> execution_count
29    pub lines: BTreeMap<u32, u32>,
30    /// LF: lines found
31    pub lines_found: u32,
32    /// LH: lines hit
33    pub lines_hit: u32,
34}
35
36/// Parsed coverage report, format-agnostic intermediate representation.
37#[derive(Debug, Clone, Serialize, Deserialize)]
38pub struct CoverageReport {
39    pub files: Vec<FileCoverage>,
40}
41
42/// Per-file coverage analysis result for change request changed lines.
43#[derive(Debug, Clone, Serialize, Deserialize)]
44pub struct FileAnalysis {
45    pub path: String,
46    pub changed_lines: u32,
47    pub covered_lines: u32,
48    pub uncovered_line_numbers: Vec<u32>,
49    pub coverage_pct: f64,
50}
51
52/// Aggregate coverage analysis for the entire change request.
53#[derive(Debug, Clone, Serialize, Deserialize)]
54pub struct CoverageAnalysis {
55    pub files: Vec<FileAnalysis>,
56    pub total_changed: u32,
57    pub total_covered: u32,
58    pub overall_pct: f64,
59}
60
61// --- Functions ---
62
63/// Parse LCOV format content into a `CoverageReport`.
64///
65/// State machine: SF starts a record, DA adds line data, LF/LH set
66/// summary counts, end_of_record finalizes. TN, FN*, BR* lines are
67/// ignored (we only need line coverage).
68pub fn parse_lcov(content: &str) -> Result<CoverageReport, ParseError> {
69    let mut files = Vec::new();
70    let mut current_path: Option<String> = None;
71    let mut current_lines: BTreeMap<u32, u32> = BTreeMap::new();
72    let mut lines_found: u32 = 0;
73    let mut lines_hit: u32 = 0;
74
75    for (idx, raw_line) in content.lines().enumerate() {
76        let line = raw_line.trim();
77        if line.is_empty() {
78            continue;
79        }
80
81        if let Some(path) = line.strip_prefix("SF:") {
82            // Start a new file record
83            current_path = Some(path.to_string());
84            current_lines.clear();
85            lines_found = 0;
86            lines_hit = 0;
87        } else if let Some(da) = line.strip_prefix("DA:") {
88            // DA:<line_number>,<execution_count>[,<checksum>]
89            let parts: Vec<&str> = da.split(',').collect();
90            if parts.len() < 2 {
91                return Err(ParseError::MalformedLine {
92                    line_number: idx + 1,
93                    content: line.to_string(),
94                });
95            }
96            let line_no: u32 = parts[0].parse().map_err(|_| ParseError::MalformedLine {
97                line_number: idx + 1,
98                content: line.to_string(),
99            })?;
100            let count: u32 = parts[1].parse().map_err(|_| ParseError::MalformedLine {
101                line_number: idx + 1,
102                content: line.to_string(),
103            })?;
104            current_lines.insert(line_no, count);
105        } else if let Some(lf) = line.strip_prefix("LF:") {
106            lines_found = lf.parse().unwrap_or(0);
107        } else if let Some(lh) = line.strip_prefix("LH:") {
108            lines_hit = lh.parse().unwrap_or(0);
109        } else if line == "end_of_record" {
110            if let Some(path) = current_path.take() {
111                files.push(FileCoverage {
112                    path,
113                    lines: std::mem::take(&mut current_lines),
114                    lines_found,
115                    lines_hit,
116                });
117            }
118            lines_found = 0;
119            lines_hit = 0;
120        }
121        // TN, FN, FNDA, FNF, FNH, BRDA, BRF, BRH — silently ignored
122    }
123
124    Ok(CoverageReport { files })
125}
126
127/// Extract added line numbers from a unified diff patch.
128///
129/// Parses `@@ -a,b +c,d @@` hunk headers to track the new-file line
130/// position, then collects lines prefixed with `+` (additions).
131pub fn extract_changed_lines(patch: &str) -> Vec<u32> {
132    let mut result = Vec::new();
133    let mut new_line: u32 = 0;
134
135    for line in patch.lines() {
136        if line.starts_with("@@") {
137            // Parse @@ -a,b +c,d @@ header
138            // Find the +c,d or +c portion
139            if let Some(plus_pos) = line.find('+') {
140                let after_plus = &line[plus_pos + 1..];
141                let end = after_plus
142                    .find(|c: char| !c.is_ascii_digit() && c != ',')
143                    .unwrap_or(after_plus.len());
144                let range_str = &after_plus[..end];
145                let start_str = range_str.split(',').next().unwrap_or("0");
146                new_line = start_str.parse().unwrap_or(0);
147            }
148        } else if line.starts_with('+') {
149            // Addition line
150            result.push(new_line);
151            new_line += 1;
152        } else if line.starts_with('-') {
153            // Deletion — does not advance new-file line counter
154        } else {
155            // Context line — advances new-file counter
156            new_line += 1;
157        }
158    }
159
160    result
161}
162
163/// Check whether an LCOV path (often absolute) matches a change request path (relative).
164///
165/// Returns true if `lcov_path` ends with `/<pr_path>`, or if they are
166/// equal after stripping `./` prefixes.
167pub fn resolve_path(lcov_path: &str, pr_path: &str) -> bool {
168    let lcov_replaced = lcov_path.replace('\\', "/");
169    let lcov_normalized = lcov_replaced.strip_prefix("./").unwrap_or(&lcov_replaced);
170    let pr_normalized = pr_path.strip_prefix("./").unwrap_or(pr_path);
171
172    if lcov_normalized == pr_normalized {
173        return true;
174    }
175
176    // Suffix match: lcov absolute path ends with /pr_path
177    let suffix = format!("/{pr_normalized}");
178    lcov_normalized.ends_with(&suffix)
179}
180
181/// Analyze coverage of change request changed lines against a parsed report.
182///
183/// For each changed file, finds the matching LCOV entry via `resolve_path`,
184/// then checks which changed lines have hit_count > 0. Files not present
185/// in the report are treated as 0% covered (safe default).
186pub fn analyze_coverage(
187    report: &CoverageReport,
188    changed_files: &[(String, Vec<u32>)],
189) -> CoverageAnalysis {
190    let mut file_analyses = Vec::new();
191    let mut total_changed: u32 = 0;
192    let mut total_covered: u32 = 0;
193
194    for (path, changed_lines) in changed_files {
195        if changed_lines.is_empty() {
196            continue;
197        }
198
199        // Find matching LCOV entry
200        let file_cov = report.files.iter().find(|f| resolve_path(&f.path, path));
201
202        let mut covered: u32 = 0;
203        let mut uncovered_lines = Vec::new();
204
205        for &line_no in changed_lines {
206            match file_cov {
207                Some(fc) => match fc.lines.get(&line_no) {
208                    Some(&count) if count > 0 => covered += 1,
209                    _ => uncovered_lines.push(line_no),
210                },
211                None => uncovered_lines.push(line_no),
212            }
213        }
214
215        let changed_count = changed_lines.len() as u32;
216        let pct = if changed_count > 0 {
217            (covered as f64 / changed_count as f64) * 100.0
218        } else {
219            100.0
220        };
221
222        total_changed += changed_count;
223        total_covered += covered;
224
225        file_analyses.push(FileAnalysis {
226            path: path.clone(),
227            changed_lines: changed_count,
228            covered_lines: covered,
229            uncovered_line_numbers: uncovered_lines,
230            coverage_pct: pct,
231        });
232    }
233
234    let overall_pct = if total_changed > 0 {
235        (total_covered as f64 / total_changed as f64) * 100.0
236    } else {
237        100.0
238    };
239
240    CoverageAnalysis {
241        files: file_analyses,
242        total_changed,
243        total_covered,
244        overall_pct,
245    }
246}
247
248/// Classify coverage severity using integer arithmetic (no f64).
249///
250/// Matches the Creusot predicate in `gh-verify-verif`:
251/// - `total == 0` => Pass (no changed lines to cover)
252/// - `covered * 100 > warn_pct * total` => Pass
253/// - `covered * 100 > error_pct * total` => Warning
254/// - otherwise => Error
255pub fn classify_coverage_severity(
256    covered: usize,
257    total: usize,
258    warn_pct: usize,
259    error_pct: usize,
260) -> Severity {
261    if total == 0 {
262        return Severity::Pass;
263    }
264    if covered * 100 > warn_pct * total {
265        Severity::Pass
266    } else if covered * 100 > error_pct * total {
267        Severity::Warning
268    } else {
269        Severity::Error
270    }
271}
272
273#[cfg(test)]
274mod tests {
275    use super::*;
276
277    // --- Helpers ---
278
279    /// Build minimal LCOV content for a single file with given line coverage.
280    fn make_lcov(path: &str, lines: &[(u32, u32)]) -> String {
281        let mut out = format!("SF:{path}\n");
282        for &(line, count) in lines {
283            out.push_str(&format!("DA:{line},{count}\n"));
284        }
285        out.push_str(&format!("LF:{}\n", lines.len()));
286        let hit = lines.iter().filter(|(_, c)| *c > 0).count();
287        out.push_str(&format!("LH:{hit}\n"));
288        out.push_str("end_of_record\n");
289        out
290    }
291
292    /// Build LCOV content for multiple files.
293    fn make_multi_lcov(entries: &[(&str, &[(u32, u32)])]) -> String {
294        entries
295            .iter()
296            .map(|(path, lines)| make_lcov(path, lines))
297            .collect::<Vec<_>>()
298            .join("")
299    }
300
301    /// Build a unified diff patch with a single hunk adding lines starting at `start`.
302    fn make_patch(start: u32, added_lines: &[&str]) -> String {
303        let count = added_lines.len() as u32;
304        let mut out = format!("@@ -1,0 +{start},{count} @@\n");
305        for line in added_lines {
306            out.push_str(&format!("+{line}\n"));
307        }
308        out
309    }
310
311    // --- parse_lcov ---
312
313    /// WHY: Verifies the minimal happy path — one SF/DA/LF/LH/end_of_record
314    /// block parses into a single FileCoverage with correct line data.
315    #[test]
316    fn parse_lcov_single_file() {
317        let content = make_lcov("/src/main.rs", &[(1, 5), (2, 0), (3, 1)]);
318        let report = parse_lcov(&content).unwrap();
319        assert_eq!(report.files.len(), 1);
320        assert_eq!(report.files[0].path, "/src/main.rs");
321        assert_eq!(report.files[0].lines.len(), 3);
322        assert_eq!(report.files[0].lines[&1], 5);
323        assert_eq!(report.files[0].lines[&2], 0);
324        assert_eq!(report.files[0].lines_found, 3);
325        assert_eq!(report.files[0].lines_hit, 2);
326    }
327
328    /// WHY: Multiple SF..end_of_record blocks must be parsed as separate
329    /// FileCoverage entries — a real LCOV report covers many files.
330    #[test]
331    fn parse_lcov_multiple_files() {
332        let content =
333            make_multi_lcov(&[("/src/a.rs", &[(1, 1)]), ("/src/b.rs", &[(1, 0), (2, 3)])]);
334        let report = parse_lcov(&content).unwrap();
335        assert_eq!(report.files.len(), 2);
336        assert_eq!(report.files[0].path, "/src/a.rs");
337        assert_eq!(report.files[1].path, "/src/b.rs");
338        assert_eq!(report.files[1].lines.len(), 2);
339    }
340
341    /// WHY: LCOV emits BRDA/BRF/BRH/FN/FNDA/FNF/FNH lines that we must
342    /// silently skip — they must not corrupt the line-coverage state machine.
343    #[test]
344    fn parse_lcov_ignores_branch_data() {
345        let content = "\
346TN:test_name
347SF:/src/main.rs
348FN:1,main
349FNDA:1,main
350FNF:1
351FNH:1
352DA:1,1
353DA:2,0
354BRDA:1,0,0,1
355BRF:1
356BRH:1
357LF:2
358LH:1
359end_of_record
360";
361        let report = parse_lcov(content).unwrap();
362        assert_eq!(report.files.len(), 1);
363        // Only DA lines contribute to the lines map
364        assert_eq!(report.files[0].lines.len(), 2);
365        assert_eq!(report.files[0].lines[&1], 1);
366    }
367
368    /// WHY: Empty input is valid — no coverage data means no files.
369    /// Must not panic or error.
370    #[test]
371    fn parse_lcov_empty_content() {
372        let report = parse_lcov("").unwrap();
373        assert!(report.files.is_empty());
374    }
375
376    /// WHY: A DA line without the required comma-separated pair must produce
377    /// a clear ParseError rather than silently corrupting data.
378    #[test]
379    fn parse_lcov_malformed_da() {
380        let content = "SF:/src/main.rs\nDA:bad\nend_of_record\n";
381        let err = parse_lcov(content).unwrap_err();
382        match err {
383            ParseError::MalformedLine {
384                line_number,
385                content,
386            } => {
387                assert_eq!(line_number, 2);
388                assert!(content.contains("DA:bad"));
389            }
390        }
391    }
392
393    // --- extract_changed_lines ---
394
395    /// WHY: Single-hunk patches are the common case. The line numbers must
396    /// correspond to the new-file side of the diff.
397    #[test]
398    fn extract_changed_lines_single_hunk() {
399        let patch = make_patch(10, &["line1", "line2", "line3"]);
400        let lines = extract_changed_lines(&patch);
401        assert_eq!(lines, vec![10, 11, 12]);
402    }
403
404    /// WHY: Multi-hunk diffs appear when edits are spread across a file.
405    /// Each hunk resets the line counter to its +c start position.
406    #[test]
407    fn extract_changed_lines_multiple_hunks() {
408        let patch = "\
409@@ -1,3 +1,4 @@
410 context
411+added_at_2
412 context
413@@ -10,2 +11,3 @@
414 context
415+added_at_12
416+added_at_13
417";
418        let lines = extract_changed_lines(patch);
419        assert_eq!(lines, vec![2, 12, 13]);
420    }
421
422    /// WHY: Deletion-only patches have no added lines. The result must be
423    /// empty — deletions cannot be "uncovered" by tests.
424    #[test]
425    fn extract_changed_lines_deletions_only() {
426        let patch = "\
427@@ -1,3 +1,1 @@
428-removed1
429-removed2
430 kept
431";
432        let lines = extract_changed_lines(patch);
433        assert!(lines.is_empty());
434    }
435
436    /// WHY: `++i;` is valid code (C/C++ increment). The old `stripped.starts_with("++")`
437    /// check incorrectly excluded such lines. GitHub's patch field never contains
438    /// `+++ b/file` headers, so the check was unnecessary and harmful.
439    #[test]
440    fn extract_changed_lines_increment_operator() {
441        let patch = "@@ -1,2 +1,3 @@\n counter = 0;\n+    ++counter;\n other();\n";
442        let lines = extract_changed_lines(patch);
443        assert_eq!(lines, vec![2], "++counter line must be included");
444    }
445
446    // --- resolve_path ---
447
448    /// WHY: LCOV typically records absolute paths. The CR uses repo-relative
449    /// paths. Suffix matching bridges this gap.
450    #[test]
451    fn resolve_path_absolute_to_relative() {
452        assert!(resolve_path(
453            "/home/user/project/src/main.rs",
454            "src/main.rs"
455        ));
456    }
457
458    /// WHY: When LCOV path equals CR path exactly (both relative), it must match.
459    #[test]
460    fn resolve_path_exact() {
461        assert!(resolve_path("src/main.rs", "src/main.rs"));
462    }
463
464    /// WHY: Non-matching paths must return false to avoid spurious coverage
465    /// attribution across unrelated files.
466    #[test]
467    fn resolve_path_no_match() {
468        assert!(!resolve_path("/src/other.rs", "src/main.rs"));
469    }
470
471    /// WHY: Windows LCOV paths use backslashes. Without normalization,
472    /// suffix matching fails and changed files appear as 0% covered.
473    #[test]
474    fn resolve_path_windows_backslash() {
475        assert!(resolve_path("C:\\work\\repo\\src\\foo.rs", "src/foo.rs"));
476    }
477
478    // --- analyze_coverage ---
479
480    /// WHY: When all changed lines have hit_count > 0, the analysis must
481    /// report 100% coverage with no uncovered lines.
482    #[test]
483    fn analyze_coverage_full() {
484        let report = parse_lcov(&make_lcov("src/main.rs", &[(1, 1), (2, 3), (3, 1)])).unwrap();
485        let changed = vec![("src/main.rs".to_string(), vec![1, 2, 3])];
486        let analysis = analyze_coverage(&report, &changed);
487        assert_eq!(analysis.total_changed, 3);
488        assert_eq!(analysis.total_covered, 3);
489        assert!((analysis.overall_pct - 100.0).abs() < f64::EPSILON);
490        assert!(analysis.files[0].uncovered_line_numbers.is_empty());
491    }
492
493    /// WHY: Partial coverage must correctly compute the ratio and identify
494    /// which specific lines are uncovered — the CLI uses this for suggestions.
495    #[test]
496    fn analyze_coverage_partial() {
497        let report = parse_lcov(&make_lcov("src/main.rs", &[(1, 1), (2, 0), (3, 1)])).unwrap();
498        let changed = vec![("src/main.rs".to_string(), vec![1, 2, 3])];
499        let analysis = analyze_coverage(&report, &changed);
500        assert_eq!(analysis.total_covered, 2);
501        assert_eq!(analysis.total_changed, 3);
502        // 2/3 ≈ 66.67%
503        assert!((analysis.overall_pct - 66.666_666_666_666_6).abs() < 0.01);
504        assert_eq!(analysis.files[0].uncovered_line_numbers, vec![2]);
505    }
506
507    /// WHY: Files absent from the LCOV report must be treated as 0% covered
508    /// (safe default) — not silently skipped, which would inflate coverage.
509    #[test]
510    fn analyze_coverage_missing_file() {
511        let report = parse_lcov(&make_lcov("src/other.rs", &[(1, 1)])).unwrap();
512        let changed = vec![("src/missing.rs".to_string(), vec![1, 2])];
513        let analysis = analyze_coverage(&report, &changed);
514        assert_eq!(analysis.total_covered, 0);
515        assert_eq!(analysis.total_changed, 2);
516        assert!((analysis.overall_pct - 0.0).abs() < f64::EPSILON);
517    }
518
519    // --- Mutant-killing tests ---
520    //
521    // Each test below targets a specific surviving mutant. The test name
522    // encodes the line, operator substitution, and function under test.
523
524    /// Kills L90: `replace < with >` in parse_lcov.
525    /// Original: `if parts.len() < 2` rejects DA lines with fewer than 2 fields.
526    /// Mutant (`>`): would reject DA lines with MORE than 2 fields (e.g. with checksum),
527    /// while accepting single-field DA lines. This test provides DA with a checksum
528    /// (3 fields) which must succeed, AND single-field DA which must fail.
529    #[test]
530    fn parse_lcov_da_with_checksum_accepted() {
531        // DA with 3 fields (checksum) must parse successfully
532        let content = "SF:/src/a.rs\nDA:1,5,abc123\nLF:1\nLH:1\nend_of_record\n";
533        let report = parse_lcov(content).unwrap();
534        assert_eq!(report.files[0].lines[&1], 5);
535    }
536
537    #[test]
538    fn parse_lcov_da_single_field_rejected() {
539        // DA with only 1 field must be rejected
540        let content = "SF:/src/a.rs\nDA:1\nend_of_record\n";
541        assert!(parse_lcov(content).is_err());
542    }
543
544    /// Kills L97: `replace + with *` and `replace + with -` in parse_lcov.
545    /// Original: error line_number is `idx + 1` (1-indexed).
546    /// Mutant `*`: line_number = idx * 1 = idx (0-indexed, off by one).
547    /// Mutant `-`: line_number = idx - 1 (off by two).
548    /// We place the bad DA on the first line (idx=0) so `0+1=1`, `0*1=0`, `0-1` wraps.
549    /// Also test at idx=2 where `2+1=3`, `2*1=2`, `2-1=1` all differ.
550    #[test]
551    fn parse_lcov_error_line_number_for_bad_line_no() {
552        // Bad line number parse at line 3 of content (idx=2)
553        let content = "SF:/src/a.rs\nDA:1,1\nDA:xyz,1\nend_of_record\n";
554        let err = parse_lcov(content).unwrap_err();
555        match err {
556            ParseError::MalformedLine { line_number, .. } => {
557                assert_eq!(line_number, 3, "error must report 1-indexed line number");
558            }
559        }
560    }
561
562    /// Kills L101: `replace + with *` and `replace + with -` in parse_lcov.
563    /// Same as above but for the execution_count parse failure path.
564    /// The line_number field parses fine, but the count field is invalid.
565    #[test]
566    fn parse_lcov_error_line_number_for_bad_count() {
567        // Line number parses OK, but count is invalid. Error on content line 3 (idx=2).
568        let content = "SF:/src/a.rs\nDA:1,1\nDA:2,xyz\nend_of_record\n";
569        let err = parse_lcov(content).unwrap_err();
570        match err {
571            ParseError::MalformedLine { line_number, .. } => {
572                assert_eq!(line_number, 3, "error must report 1-indexed line number");
573            }
574        }
575    }
576
577    /// Kills L142: `replace != with ==` in extract_changed_lines.
578    /// Original: `.find(|c: char| !c.is_ascii_digit() && c != ',')`
579    /// stops at any non-digit-non-comma (e.g., space or `@`).
580    /// Mutant (`==`): `.find(|c: char| !c.is_ascii_digit() && c == ',')`
581    /// only stops at comma; for a hunk header WITHOUT comma like `+5 @@`,
582    /// the mutant scans past the space/@ and returns `unwrap_or(len)`,
583    /// yielding `range_str = "5 @@"` which fails to parse, defaulting to 0.
584    #[test]
585    fn extract_changed_lines_hunk_without_comma() {
586        // Hunk header `+5 @@` has no comma — single-line hunk
587        let patch = "@@ -1,1 +5 @@\n+new_line\n";
588        let lines = extract_changed_lines(patch);
589        assert_eq!(
590            lines,
591            vec![5],
592            "single-line hunk start must parse correctly"
593        );
594    }
595
596    /// Kills L216: `replace > with ==` in analyze_coverage.
597    /// Original: `if changed_count > 0 { compute pct } else { 100.0 }`
598    /// Mutant (`==`): `if changed_count == 0 { compute pct } else { 100.0 }`
599    /// When changed_count > 0, mutant returns 100.0 instead of real pct.
600    /// When changed_count == 0, mutant tries to divide by zero (0/0 => NaN or 100.0).
601    /// The existing partial-coverage test (66.67%) already checks pct but let's
602    /// add a targeted test with exactly 1 changed line covered=0 for clarity.
603    #[test]
604    fn analyze_coverage_nonzero_changed_computes_pct() {
605        // 1 changed line, 0 covered => pct must be 0.0, not 100.0
606        let report = parse_lcov(&make_lcov("src/a.rs", &[(1, 0)])).unwrap();
607        let changed = vec![("src/a.rs".to_string(), vec![1])];
608        let analysis = analyze_coverage(&report, &changed);
609        assert_eq!(analysis.files[0].changed_lines, 1);
610        assert_eq!(analysis.files[0].covered_lines, 0);
611        assert!(
612            analysis.files[0].coverage_pct < 1.0,
613            "coverage_pct must be 0.0 when no lines are covered, got {}",
614            analysis.files[0].coverage_pct
615        );
616    }
617
618    // --- classify_coverage_severity ---
619
620    /// WHY: Biconditional test — verifies forward implication AND contrapositive
621    /// at threshold boundaries to ensure the integer arithmetic matches the
622    /// Creusot specification exactly.
623    #[test]
624    fn classify_severity_biconditional() {
625        // total=0 => always Pass
626        assert_eq!(
627            classify_coverage_severity(0, 0, 80, 50),
628            Severity::Pass,
629            "total=0 must be Pass regardless of covered"
630        );
631
632        // warn_pct=80, error_pct=50, total=100
633        // covered=81 => 81*100=8100 > 80*100=8000 => Pass
634        assert_eq!(
635            classify_coverage_severity(81, 100, 80, 50),
636            Severity::Pass,
637            "81% > 80% warn threshold => Pass"
638        );
639        // covered=80 => 80*100=8000 <= 80*100=8000 (not strictly greater) => NOT Pass
640        assert_ne!(
641            classify_coverage_severity(80, 100, 80, 50),
642            Severity::Pass,
643            "80% == warn threshold => NOT Pass (contrapositive)"
644        );
645
646        // covered=51 => 51*100=5100 > 50*100=5000 => Warning (not Error)
647        assert_eq!(
648            classify_coverage_severity(51, 100, 80, 50),
649            Severity::Warning,
650            "51% > 50% error threshold but <= 80% => Warning"
651        );
652        // covered=50 => 50*100=5000 <= 50*100=5000 => Error
653        assert_eq!(
654            classify_coverage_severity(50, 100, 80, 50),
655            Severity::Error,
656            "50% == error threshold => Error (contrapositive of Warning)"
657        );
658
659        // covered=0 => Error
660        assert_eq!(
661            classify_coverage_severity(0, 100, 80, 50),
662            Severity::Error,
663            "0% coverage => Error"
664        );
665    }
666
667    /// WHY: Exhaustive test over a small domain (0..20) ensures the runtime
668    /// implementation matches the Creusot spec for ALL input combinations,
669    /// not just hand-picked boundary values.
670    #[test]
671    fn classify_severity_exhaustive_small() {
672        let warn_pct = 80;
673        let error_pct = 50;
674
675        for total in 0..=20usize {
676            for covered in 0..=20usize {
677                let result = classify_coverage_severity(covered, total, warn_pct, error_pct);
678                let spec = if total == 0 {
679                    Severity::Pass
680                } else if covered * 100 > warn_pct * total {
681                    Severity::Pass
682                } else if covered * 100 > error_pct * total {
683                    Severity::Warning
684                } else {
685                    Severity::Error
686                };
687                assert_eq!(
688                    result, spec,
689                    "classify_coverage_severity({covered}, {total}, {warn_pct}, {error_pct}): \
690                     got {result:?}, spec {spec:?}"
691                );
692            }
693        }
694    }
695}