Skip to main content

tokmd_cockpit/
lib.rs

1//! # tokmd-cockpit
2//!
3//! **Tier 2 (Computation & Rendering)**
4//!
5//! Cockpit PR metrics computation and rendering for tokmd.
6//! Provides functions to compute change surface, code health, risk,
7//! composition, evidence gates, and review plans for pull requests.
8//!
9//! ## What belongs here
10//! * Cockpit metric computation functions
11//! * Evidence gate computation (mutation, diff coverage, complexity, etc.)
12//! * Markdown/JSON/sections rendering
13//! * Determinism hashing helpers
14//!
15//! ## What does NOT belong here
16//! * CLI argument parsing (use tokmd-config)
17//! * Type definitions (use tokmd-types::cockpit)
18
19pub mod determinism;
20pub mod render;
21
22#[cfg(feature = "git")]
23use std::collections::BTreeMap;
24#[cfg(feature = "git")]
25use std::path::{Path, PathBuf};
26#[cfg(feature = "git")]
27use std::process::Command;
28
29use anyhow::Result;
30#[cfg(feature = "git")]
31use anyhow::{Context, bail};
32#[cfg(feature = "git")]
33use serde::Deserialize;
34
35// Re-export types from tokmd_types::cockpit for convenience
36pub use tokmd_types::cockpit::*;
37
38/// Cyclomatic complexity threshold for high complexity.
39pub const COMPLEXITY_THRESHOLD: u32 = 15;
40
41/// File stat from git diff --numstat.
42/// File stat from git diff --numstat.
43#[derive(Debug, Clone)]
44pub struct FileStat {
45    pub path: String,
46    pub insertions: usize,
47    pub deletions: usize,
48}
49
50impl AsRef<str> for FileStat {
51    fn as_ref(&self) -> &str {
52        &self.path
53    }
54}
55
56// =============================================================================
57// Core cockpit computation
58// =============================================================================
59
60/// Compute the full cockpit receipt for a PR.
61#[cfg(feature = "git")]
62pub fn compute_cockpit(
63    repo_root: &PathBuf,
64    base: &str,
65    head: &str,
66    range_mode: tokmd_git::GitRangeMode,
67    baseline_path: Option<&Path>,
68) -> Result<CockpitReceipt> {
69    let generated_at_ms = std::time::SystemTime::now()
70        .duration_since(std::time::UNIX_EPOCH)
71        .unwrap_or_default()
72        .as_millis() as u64;
73
74    // Get changed files with their stats
75    let file_stats = get_file_stats(repo_root, base, head, range_mode)?;
76
77    // Get change surface from git
78    let change_surface = compute_change_surface(repo_root, base, head, &file_stats, range_mode)?;
79
80    // Compute composition with test ratio
81    let composition = compute_composition(&file_stats);
82
83    // Detect contract changes
84    let contracts = detect_contracts(&file_stats);
85
86    // Compute code health
87    let code_health = compute_code_health(&file_stats, &contracts);
88
89    // Compute all gate evidence
90    let evidence = compute_evidence(
91        repo_root,
92        base,
93        head,
94        &file_stats,
95        &contracts,
96        range_mode,
97        baseline_path,
98    )?;
99
100    // Generate review plan with complexity scores
101    let review_plan = generate_review_plan(&file_stats, &contracts);
102
103    // Compute risk based on various factors
104    let risk = compute_risk_owned(file_stats, &contracts, &code_health);
105
106    Ok(CockpitReceipt {
107        schema_version: COCKPIT_SCHEMA_VERSION,
108        mode: "cockpit".to_string(),
109        generated_at_ms,
110        base_ref: base.to_string(),
111        head_ref: head.to_string(),
112        change_surface,
113        composition,
114        code_health,
115        risk,
116        contracts,
117        evidence,
118        review_plan,
119        trend: None, // Populated by caller if --baseline is provided
120    })
121}
122
123// =============================================================================
124// Trend computation
125// =============================================================================
126
127/// Load baseline receipt and compute trend comparison.
128pub fn load_and_compute_trend(
129    baseline_path: &std::path::Path,
130    current: &CockpitReceipt,
131) -> Result<TrendComparison> {
132    // Try to load baseline
133    let content = match std::fs::read_to_string(baseline_path) {
134        Ok(c) => c,
135        Err(_) => {
136            return Ok(TrendComparison {
137                baseline_available: false,
138                baseline_path: Some(baseline_path.to_string_lossy().to_string()),
139                ..Default::default()
140            });
141        }
142    };
143
144    let baseline: CockpitReceipt = match serde_json::from_str(&content) {
145        Ok(b) => b,
146        Err(_) => {
147            return Ok(TrendComparison {
148                baseline_available: false,
149                baseline_path: Some(baseline_path.to_string_lossy().to_string()),
150                ..Default::default()
151            });
152        }
153    };
154
155    // Compute health trend
156    let health = compute_metric_trend(
157        current.code_health.score as f64,
158        baseline.code_health.score as f64,
159        true, // Higher is better for health
160    );
161
162    // Compute risk trend
163    let risk = compute_metric_trend(
164        current.risk.score as f64,
165        baseline.risk.score as f64,
166        false, // Lower is better for risk
167    );
168
169    // Compute complexity trend indicator
170    let complexity = compute_complexity_trend(current, &baseline);
171
172    Ok(TrendComparison {
173        baseline_available: true,
174        baseline_path: Some(baseline_path.to_string_lossy().to_string()),
175        baseline_generated_at_ms: Some(baseline.generated_at_ms),
176        health: Some(health),
177        risk: Some(risk),
178        complexity: Some(complexity),
179    })
180}
181
182/// Compute trend metric with direction.
183pub fn compute_metric_trend(current: f64, previous: f64, higher_is_better: bool) -> TrendMetric {
184    let delta = current - previous;
185    let delta_pct = if previous != 0.0 {
186        (delta / previous) * 100.0
187    } else if current != 0.0 {
188        100.0
189    } else {
190        0.0
191    };
192
193    // Determine direction based on whether improvement means higher or lower
194    let direction = if delta.abs() < 1.0 {
195        TrendDirection::Stable
196    } else if higher_is_better {
197        if delta > 0.0 {
198            TrendDirection::Improving
199        } else {
200            TrendDirection::Degrading
201        }
202    } else {
203        // Lower is better (e.g., risk)
204        if delta < 0.0 {
205            TrendDirection::Improving
206        } else {
207            TrendDirection::Degrading
208        }
209    };
210
211    TrendMetric {
212        current,
213        previous,
214        delta,
215        delta_pct: round_pct(delta_pct),
216        direction,
217    }
218}
219
220/// Compute complexity trend indicator.
221pub fn compute_complexity_trend(
222    current: &CockpitReceipt,
223    baseline: &CockpitReceipt,
224) -> TrendIndicator {
225    // Compare complexity gate results if available
226    let current_complexity = current
227        .evidence
228        .complexity
229        .as_ref()
230        .map(|c| c.avg_cyclomatic)
231        .unwrap_or(0.0);
232    let baseline_complexity = baseline
233        .evidence
234        .complexity
235        .as_ref()
236        .map(|c| c.avg_cyclomatic)
237        .unwrap_or(0.0);
238
239    let delta = current_complexity - baseline_complexity;
240
241    let direction = if delta.abs() < 0.5 {
242        TrendDirection::Stable
243    } else if delta < 0.0 {
244        TrendDirection::Improving
245    } else {
246        TrendDirection::Degrading
247    };
248
249    let summary = match direction {
250        TrendDirection::Improving => "Complexity decreased".to_string(),
251        TrendDirection::Stable => "Complexity stable".to_string(),
252        TrendDirection::Degrading => "Complexity increased".to_string(),
253    };
254
255    TrendIndicator {
256        direction,
257        summary,
258        files_increased: 0, // Would require per-file comparison
259        files_decreased: 0,
260        avg_cyclomatic_delta: Some(round_pct(delta)),
261        avg_cognitive_delta: None,
262    }
263}
264
265// =============================================================================
266// Evidence computation
267// =============================================================================
268
269/// Compute evidence section with all gates.
270#[cfg(feature = "git")]
271fn compute_evidence(
272    repo_root: &PathBuf,
273    base: &str,
274    head: &str,
275    changed_files: &[FileStat],
276    contracts_info: &Contracts,
277    range_mode: tokmd_git::GitRangeMode,
278    baseline_path: Option<&Path>,
279) -> Result<Evidence> {
280    let mutation = compute_mutation_gate(repo_root, base, head, changed_files, range_mode)?;
281    let diff_coverage = compute_diff_coverage_gate(repo_root, base, head, range_mode)?;
282    let contracts = compute_contract_gate(repo_root, base, head, changed_files, contracts_info)?;
283    let supply_chain = compute_supply_chain_gate(repo_root, changed_files)?;
284    let determinism = compute_determinism_gate(repo_root, baseline_path)?;
285    let complexity = compute_complexity_gate(repo_root, changed_files)?;
286
287    // Compute overall status: any Fail -> Fail, all Pass -> Pass, otherwise Pending/Skipped
288    let overall_status = compute_overall_status(
289        &mutation,
290        &diff_coverage,
291        &contracts,
292        &supply_chain,
293        &determinism,
294        &complexity,
295    );
296
297    Ok(Evidence {
298        overall_status,
299        mutation,
300        diff_coverage,
301        contracts,
302        supply_chain,
303        determinism,
304        complexity,
305    })
306}
307
308/// Compute overall status from all gates.
309#[cfg(feature = "git")]
310fn compute_overall_status(
311    mutation: &MutationGate,
312    diff_coverage: &Option<DiffCoverageGate>,
313    contracts: &Option<ContractDiffGate>,
314    supply_chain: &Option<SupplyChainGate>,
315    determinism: &Option<DeterminismGate>,
316    complexity: &Option<ComplexityGate>,
317) -> GateStatus {
318    let statuses: Vec<GateStatus> = [
319        Some(mutation.meta.status),
320        diff_coverage.as_ref().map(|g| g.meta.status),
321        contracts.as_ref().map(|g| g.meta.status),
322        supply_chain.as_ref().map(|g| g.meta.status),
323        determinism.as_ref().map(|g| g.meta.status),
324        complexity.as_ref().map(|g| g.meta.status),
325    ]
326    .into_iter()
327    .flatten()
328    .collect();
329
330    if statuses.is_empty() {
331        return GateStatus::Skipped;
332    }
333
334    // Any Fail -> overall Fail
335    if statuses.contains(&GateStatus::Fail) {
336        return GateStatus::Fail;
337    }
338
339    // All Pass -> overall Pass
340    if statuses.iter().all(|s| *s == GateStatus::Pass) {
341        return GateStatus::Pass;
342    }
343
344    // Any Pending (and no Fail) -> overall Pending
345    if statuses.contains(&GateStatus::Pending) {
346        return GateStatus::Pending;
347    }
348
349    // Any Warn (and no Fail/Pending) -> overall Warn
350    if statuses.contains(&GateStatus::Warn) {
351        return GateStatus::Warn;
352    }
353
354    // All Skipped -> Skipped; mix of Pass and Skipped -> Pass
355    if statuses.iter().all(|s| *s == GateStatus::Skipped) {
356        GateStatus::Skipped
357    } else {
358        GateStatus::Pass
359    }
360}
361
362// =============================================================================
363// Diff coverage gate
364// =============================================================================
365
366/// Compute diff coverage gate.
367/// Looks for coverage artifacts (lcov.info, coverage.json, cobertura.xml) and parses them.
368#[cfg(feature = "git")]
369fn compute_diff_coverage_gate(
370    repo_root: &Path,
371    base: &str,
372    head: &str,
373    range_mode: tokmd_git::GitRangeMode,
374) -> Result<Option<DiffCoverageGate>> {
375    // 1. Get added lines from git
376    let added_lines = match tokmd_git::get_added_lines(repo_root, base, head, range_mode) {
377        Ok(lines) => lines,
378        Err(_) => return Ok(None),
379    };
380
381    if added_lines.is_empty() {
382        return Ok(None);
383    }
384
385    // 2. Search for coverage artifacts in common locations
386    let search_paths = [
387        "coverage/lcov.info",
388        "target/coverage/lcov.info",
389        "lcov.info",
390        "coverage/cobertura.xml",
391        "target/coverage/cobertura.xml",
392        "cobertura.xml",
393        "coverage/coverage.json",
394        "target/coverage/coverage.json",
395        "coverage.json",
396    ];
397
398    let mut lcov_path: Option<PathBuf> = None;
399    for candidate in &search_paths {
400        let path = repo_root.join(candidate);
401        if path.exists() {
402            lcov_path = Some(path);
403            break;
404        }
405    }
406
407    let lcov_path = match lcov_path {
408        Some(p) => p,
409        None => return Ok(None), // No coverage artifact found
410    };
411
412    // Only parse lcov.info format for now (most common in Rust via cargo-llvm-cov)
413    let path_str = lcov_path.to_string_lossy();
414    if !path_str.ends_with("lcov.info") {
415        // We found a coverage file but can't parse non-lcov yet
416        return Ok(None);
417    }
418
419    let content = match std::fs::read_to_string(&lcov_path) {
420        Ok(c) => c,
421        Err(_) => return Ok(None),
422    };
423
424    // 3. Parse LCOV into a lookup map: file -> line -> hit_count
425    let mut lcov_data: BTreeMap<String, BTreeMap<usize, usize>> = BTreeMap::new();
426    let mut current_file: Option<String> = None;
427    let mut current_lines = BTreeMap::new();
428
429    for line in content.lines() {
430        if let Some(sf) = line.strip_prefix("SF:") {
431            // Normalize path to repo-relative
432            let path = sf.replace('\\', "/");
433            // If it's absolute, try to make it relative to repo root
434            let normalized = if let Ok(abs) = Path::new(&path).canonicalize() {
435                if let Ok(rel) = abs.strip_prefix(repo_root.canonicalize().unwrap_or_default()) {
436                    rel.to_string_lossy().replace('\\', "/")
437                } else {
438                    path
439                }
440            } else {
441                path
442            };
443            current_file = Some(normalized);
444            current_lines.clear();
445        } else if let Some(da) = line.strip_prefix("DA:") {
446            if current_file.is_some() {
447                let parts: Vec<&str> = da.splitn(2, ',').collect();
448                if parts.len() == 2
449                    && let (Ok(line_no), Ok(count)) =
450                        (parts[0].parse::<usize>(), parts[1].parse::<usize>())
451                {
452                    current_lines.insert(line_no, count);
453                }
454            }
455        } else if line == "end_of_record"
456            && let Some(file) = current_file.take()
457        {
458            let lines = std::mem::take(&mut current_lines);
459            if let Some(entry) = lcov_data.get_mut(&file) {
460                entry.extend(lines);
461            } else {
462                lcov_data.insert(file, lines);
463            }
464        }
465    }
466
467    if let Some(file) = current_file.take() {
468        let lines = std::mem::take(&mut current_lines);
469        if let Some(entry) = lcov_data.get_mut(&file) {
470            entry.extend(lines);
471        } else {
472            lcov_data.insert(file, lines);
473        }
474    }
475
476    // 4. Intersect added lines with LCOV hits
477    let mut total_added = 0usize;
478    let mut total_covered = 0usize;
479    let mut uncovered_hunks: Vec<UncoveredHunk> = Vec::new();
480    let mut tested_files: std::collections::BTreeSet<String> = std::collections::BTreeSet::new();
481
482    for (file_path, lines) in added_lines {
483        let file_path_str = file_path.to_string_lossy().replace('\\', "/");
484        total_added += lines.len();
485
486        let mut uncovered_in_file = Vec::new();
487
488        if let Some(file_lcov) = lcov_data.get(&file_path_str) {
489            tested_files.insert(file_path_str.clone());
490            for line in lines {
491                match file_lcov.get(&line) {
492                    Some(&count) if count > 0 => {
493                        total_covered += 1;
494                    }
495                    _ => {
496                        uncovered_in_file.push(line);
497                    }
498                }
499            }
500        } else {
501            // File not in LCOV - treat all added lines as uncovered
502            uncovered_in_file.extend(lines);
503        }
504
505        flush_uncovered_hunks(&file_path_str, &uncovered_in_file, &mut uncovered_hunks);
506    }
507
508    if total_added == 0 {
509        return Ok(None);
510    }
511
512    let coverage_pct = round_pct(total_covered as f64 / total_added as f64);
513    let status = if coverage_pct >= 0.80 {
514        GateStatus::Pass
515    } else if coverage_pct >= 0.50 {
516        GateStatus::Warn
517    } else {
518        GateStatus::Fail
519    };
520
521    // Limit uncovered hunks to avoid huge output
522    uncovered_hunks.truncate(20);
523
524    Ok(Some(DiffCoverageGate {
525        meta: GateMeta {
526            status,
527            source: EvidenceSource::CiArtifact,
528            commit_match: CommitMatch::Unknown,
529            scope: ScopeCoverage {
530                relevant: lcov_data.keys().cloned().collect(),
531                tested: tested_files.into_iter().collect(),
532                ratio: coverage_pct,
533                lines_relevant: Some(total_added),
534                lines_tested: Some(total_covered),
535            },
536            evidence_commit: None,
537            evidence_generated_at_ms: None,
538        },
539        lines_added: total_added,
540        lines_covered: total_covered,
541        coverage_pct,
542        uncovered_hunks,
543    }))
544}
545
546/// Flush consecutive uncovered lines into hunk ranges.
547#[cfg(feature = "git")]
548fn flush_uncovered_hunks(file: &str, uncovered: &[usize], hunks: &mut Vec<UncoveredHunk>) {
549    if uncovered.is_empty() || file.is_empty() {
550        return;
551    }
552    let mut sorted = uncovered.to_vec();
553    sorted.sort_unstable();
554    let mut start = sorted[0];
555    let mut end = sorted[0];
556    for &line in &sorted[1..] {
557        if line == end + 1 {
558            end = line;
559        } else {
560            hunks.push(UncoveredHunk {
561                file: file.to_string(),
562                start_line: start,
563                end_line: end,
564            });
565            start = line;
566            end = line;
567        }
568    }
569    hunks.push(UncoveredHunk {
570        file: file.to_string(),
571        start_line: start,
572        end_line: end,
573    });
574}
575
576// =============================================================================
577// Contract gate
578// =============================================================================
579
580/// Compute contract diff gate (semver, CLI, schema).
581#[cfg(feature = "git")]
582fn compute_contract_gate(
583    repo_root: &Path,
584    base: &str,
585    head: &str,
586    changed_files: &[FileStat],
587    contracts_info: &Contracts,
588) -> Result<Option<ContractDiffGate>> {
589    // Only compute if any contract-relevant files changed
590    if !contracts_info.api_changed && !contracts_info.cli_changed && !contracts_info.schema_changed
591    {
592        return Ok(None);
593    }
594
595    let mut failures = 0;
596    let mut semver = None;
597    let mut cli = None;
598    let mut schema = None;
599
600    // Check for semver changes (API files)
601    if contracts_info.api_changed {
602        semver = Some(run_semver_check(repo_root));
603    }
604
605    // Check for CLI changes
606    if contracts_info.cli_changed {
607        // Gather CLI-related files that changed
608        let cli_files: Vec<&str> = changed_files
609            .iter()
610            .filter(|f| {
611                f.path.contains("crates/tokmd/src/commands/")
612                    || f.path.contains("crates/tokmd-config/")
613            })
614            .map(|s| s.path.as_str())
615            .collect();
616
617        let diff_summary = if cli_files.is_empty() {
618            None
619        } else {
620            let command_files = cli_files
621                .iter()
622                .filter(|f| f.contains("crates/tokmd/src/commands/"))
623                .count();
624            let config_files = cli_files
625                .iter()
626                .filter(|f| f.contains("crates/tokmd-config/"))
627                .count();
628
629            let mut parts = Vec::new();
630            if command_files > 0 {
631                parts.push(format!(
632                    "{} command file{}",
633                    command_files,
634                    if command_files == 1 { "" } else { "s" }
635                ));
636            }
637            if config_files > 0 {
638                parts.push(format!(
639                    "{} config file{}",
640                    config_files,
641                    if config_files == 1 { "" } else { "s" }
642                ));
643            }
644            Some(parts.join(", "))
645        };
646
647        cli = Some(CliSubGate {
648            status: GateStatus::Pass,
649            diff_summary,
650        });
651    }
652
653    // Check for schema changes
654    if contracts_info.schema_changed {
655        schema = Some(run_schema_diff(repo_root, base, head));
656    }
657
658    // Count failures from sub-gates
659    if let Some(ref sg) = semver
660        && sg.status == GateStatus::Fail
661    {
662        failures += 1;
663    }
664    if let Some(ref cg) = cli
665        && cg.status == GateStatus::Fail
666    {
667        failures += 1;
668    }
669    if let Some(ref scg) = schema
670        && scg.status == GateStatus::Fail
671    {
672        failures += 1;
673    }
674
675    // Determine overall status
676    let status = if failures > 0 {
677        GateStatus::Fail
678    } else {
679        // Check if any are pending
680        let any_pending = [
681            semver.as_ref().map(|g| g.status),
682            cli.as_ref().map(|g| g.status),
683            schema.as_ref().map(|g| g.status),
684        ]
685        .into_iter()
686        .flatten()
687        .any(|s| s == GateStatus::Pending);
688
689        if any_pending {
690            GateStatus::Pending
691        } else {
692            GateStatus::Pass
693        }
694    };
695
696    // Collect relevant files for scope
697    let relevant: Vec<String> = changed_files
698        .iter()
699        .filter(|f| {
700            f.path.ends_with("/src/lib.rs")
701                || f.path.ends_with("/mod.rs")
702                || f.path.contains("crates/tokmd/src/commands/")
703                || f.path.contains("crates/tokmd-config/")
704                || f.path == "docs/schema.json"
705        })
706        .map(|f| f.path.clone())
707        .collect();
708
709    Ok(Some(ContractDiffGate {
710        meta: GateMeta {
711            status,
712            source: EvidenceSource::RanLocal,
713            commit_match: CommitMatch::Unknown,
714            scope: ScopeCoverage {
715                relevant: relevant.clone(),
716                tested: relevant,
717                ratio: 1.0,
718                lines_relevant: None,
719                lines_tested: None,
720            },
721            evidence_commit: None,
722            evidence_generated_at_ms: None,
723        },
724        semver,
725        cli,
726        schema,
727        failures,
728    }))
729}
730
731/// Run cargo-semver-checks if available.
732/// Returns a SemverSubGate with the result.
733#[cfg(feature = "git")]
734fn run_semver_check(repo_root: &Path) -> SemverSubGate {
735    // Check if cargo-semver-checks is available
736    let available = Command::new("cargo")
737        .args(["semver-checks", "--version"])
738        .output()
739        .map(|o| o.status.success())
740        .unwrap_or(false);
741
742    if !available {
743        return SemverSubGate {
744            status: GateStatus::Pending,
745            breaking_changes: Vec::new(),
746        };
747    }
748
749    // Run cargo semver-checks
750    let output = match Command::new("cargo")
751        .args(["semver-checks", "check-release"])
752        .current_dir(repo_root)
753        .output()
754    {
755        Ok(o) => o,
756        Err(_) => {
757            return SemverSubGate {
758                status: GateStatus::Pending,
759                breaking_changes: Vec::new(),
760            };
761        }
762    };
763
764    if output.status.success() {
765        // Exit 0 = no breaking changes
766        return SemverSubGate {
767            status: GateStatus::Pass,
768            breaking_changes: Vec::new(),
769        };
770    }
771
772    // Non-zero exit = breaking changes found
773    let stderr = String::from_utf8_lossy(&output.stderr);
774    let stdout = String::from_utf8_lossy(&output.stdout);
775    let combined = format!("{}{}", stdout, stderr);
776
777    // Parse breaking changes from output lines
778    // cargo-semver-checks output format: "--- failure[kind]: message ---" or similar
779    let mut breaking_changes: Vec<BreakingChange> = Vec::new();
780    for line in combined.lines() {
781        let trimmed = line.trim();
782        if trimmed.contains("BREAKING") || trimmed.starts_with("---") {
783            breaking_changes.push(BreakingChange {
784                kind: "semver".to_string(),
785                path: String::new(),
786                message: trimmed.to_string(),
787            });
788        }
789    }
790
791    // If we couldn't parse specific changes but the tool failed, add a generic entry
792    if breaking_changes.is_empty() {
793        breaking_changes.push(BreakingChange {
794            kind: "semver".to_string(),
795            path: String::new(),
796            message: "cargo-semver-checks reported breaking changes".to_string(),
797        });
798    }
799
800    // Limit output
801    breaking_changes.truncate(20);
802
803    SemverSubGate {
804        status: GateStatus::Fail,
805        breaking_changes,
806    }
807}
808
809/// Run git diff on docs/schema.json to detect schema changes.
810/// Returns a SchemaSubGate with the result.
811#[cfg(feature = "git")]
812fn run_schema_diff(repo_root: &Path, base: &str, head: &str) -> SchemaSubGate {
813    // Use two-dot syntax for comparing refs directly (per project convention)
814    let range = format!("{}..{}", base, head);
815    let output = match Command::new("git")
816        .arg("-C")
817        .arg(repo_root)
818        .args(["diff", &range, "--", "docs/schema.json"])
819        .output()
820    {
821        Ok(o) => o,
822        Err(_) => {
823            return SchemaSubGate {
824                status: GateStatus::Pending,
825                diff_summary: None,
826            };
827        }
828    };
829
830    if !output.status.success() {
831        return SchemaSubGate {
832            status: GateStatus::Pending,
833            diff_summary: None,
834        };
835    }
836
837    let diff = String::from_utf8_lossy(&output.stdout);
838    if diff.trim().is_empty() {
839        // No diff means schema.json didn't change between these refs
840        return SchemaSubGate {
841            status: GateStatus::Pass,
842            diff_summary: None,
843        };
844    }
845
846    // Analyze the diff for breaking vs additive changes
847    let mut additions = 0usize;
848    let mut removals = 0usize;
849    let mut has_type_change = false;
850
851    for line in diff.lines() {
852        if line.starts_with('+') && !line.starts_with("+++") {
853            additions += 1;
854        } else if line.starts_with('-') && !line.starts_with("---") {
855            removals += 1;
856            // Check for type changes (field type modifications)
857            let trimmed = line.trim_start_matches('-').trim();
858            if trimmed.contains("\"type\"") {
859                has_type_change = true;
860            }
861        }
862    }
863
864    let (status, summary) = if removals == 0 {
865        // Only additions = safe additive change
866        (
867            GateStatus::Pass,
868            Some(format!(
869                "schema.json: {} line{} added (additive only)",
870                additions,
871                if additions == 1 { "" } else { "s" }
872            )),
873        )
874    } else if has_type_change || removals > additions {
875        // Type changes or net removals = likely breaking
876        (
877            GateStatus::Fail,
878            Some(format!(
879                "schema.json: {} addition{}, {} removal{} (potential breaking change)",
880                additions,
881                if additions == 1 { "" } else { "s" },
882                removals,
883                if removals == 1 { "" } else { "s" }
884            )),
885        )
886    } else {
887        // Removals but mostly additions = warn
888        (
889            GateStatus::Pass,
890            Some(format!(
891                "schema.json: {} addition{}, {} removal{}",
892                additions,
893                if additions == 1 { "" } else { "s" },
894                removals,
895                if removals == 1 { "" } else { "s" }
896            )),
897        )
898    };
899
900    SchemaSubGate {
901        status,
902        diff_summary: summary,
903    }
904}
905
906// =============================================================================
907// Supply chain gate
908// =============================================================================
909
910/// Compute supply chain gate.
911/// Checks if Cargo.lock changed and runs cargo-audit if available.
912#[cfg(feature = "git")]
913fn compute_supply_chain_gate(
914    repo_root: &Path,
915    changed_files: &[FileStat],
916) -> Result<Option<SupplyChainGate>> {
917    // Only compute if Cargo.lock changed
918    let lock_changed = changed_files.iter().any(|f| f.path.ends_with("Cargo.lock"));
919    if !lock_changed {
920        return Ok(None);
921    }
922
923    // Check if cargo-audit is available
924    let check = Command::new("cargo").arg("audit").arg("--version").output();
925
926    let audit_available = check.as_ref().map(|o| o.status.success()).unwrap_or(false);
927
928    if !audit_available {
929        // cargo-audit not available, return Pending status
930        return Ok(Some(SupplyChainGate {
931            meta: GateMeta {
932                status: GateStatus::Pending,
933                source: EvidenceSource::RanLocal,
934                commit_match: CommitMatch::Unknown,
935                scope: ScopeCoverage {
936                    relevant: vec!["Cargo.lock".to_string()],
937                    tested: Vec::new(),
938                    ratio: 0.0,
939                    lines_relevant: None,
940                    lines_tested: None,
941                },
942                evidence_commit: None,
943                evidence_generated_at_ms: None,
944            },
945            vulnerabilities: Vec::new(),
946            denied: Vec::new(),
947            advisory_db_version: None,
948        }));
949    }
950
951    // Run cargo audit with JSON output
952    let audit_output = Command::new("cargo")
953        .args(["audit", "--json"])
954        .current_dir(repo_root)
955        .output();
956
957    let output = match audit_output {
958        Ok(o) => o,
959        Err(_) => {
960            // Failed to run cargo-audit, return Pending
961            return Ok(Some(SupplyChainGate {
962                meta: GateMeta {
963                    status: GateStatus::Pending,
964                    source: EvidenceSource::RanLocal,
965                    commit_match: CommitMatch::Unknown,
966                    scope: ScopeCoverage {
967                        relevant: vec!["Cargo.lock".to_string()],
968                        tested: Vec::new(),
969                        ratio: 0.0,
970                        lines_relevant: None,
971                        lines_tested: None,
972                    },
973                    evidence_commit: None,
974                    evidence_generated_at_ms: None,
975                },
976                vulnerabilities: Vec::new(),
977                denied: Vec::new(),
978                advisory_db_version: None,
979            }));
980        }
981    };
982
983    // Parse JSON output
984    let stdout = String::from_utf8_lossy(&output.stdout);
985
986    // Intermediate structs for parsing cargo-audit JSON output
987    #[derive(Deserialize)]
988    struct AuditOutput {
989        database: Option<AuditDatabase>,
990        vulnerabilities: Option<AuditVulnerabilities>,
991    }
992
993    #[derive(Deserialize)]
994    #[allow(dead_code)]
995    struct AuditDatabase {
996        #[serde(rename = "advisory-count")]
997        advisory_count: Option<u32>,
998        version: Option<String>,
999    }
1000
1001    #[derive(Deserialize)]
1002    #[allow(dead_code)]
1003    struct AuditVulnerabilities {
1004        found: Option<bool>,
1005        count: Option<u32>,
1006        list: Option<Vec<AuditVulnEntry>>,
1007    }
1008
1009    #[derive(Deserialize)]
1010    struct AuditVulnEntry {
1011        advisory: Option<AuditAdvisory>,
1012        package: Option<AuditPackage>,
1013    }
1014
1015    #[derive(Deserialize)]
1016    struct AuditAdvisory {
1017        id: Option<String>,
1018        severity: Option<String>,
1019        title: Option<String>,
1020    }
1021
1022    #[derive(Deserialize)]
1023    struct AuditPackage {
1024        name: Option<String>,
1025    }
1026
1027    let parsed: Result<AuditOutput, _> = serde_json::from_str(&stdout);
1028
1029    let (vulnerabilities, advisory_db_version, status) = match parsed {
1030        Ok(audit) => {
1031            let db_version = audit.database.and_then(|db| db.version);
1032
1033            let vulns: Vec<Vulnerability> = audit
1034                .vulnerabilities
1035                .and_then(|v| v.list)
1036                .unwrap_or_default()
1037                .into_iter()
1038                .filter_map(|entry| {
1039                    let advisory = entry.advisory?;
1040                    Some(Vulnerability {
1041                        id: advisory.id.unwrap_or_default(),
1042                        package: entry.package.and_then(|p| p.name).unwrap_or_default(),
1043                        severity: advisory
1044                            .severity
1045                            .clone()
1046                            .unwrap_or_else(|| "unknown".to_string()),
1047                        title: advisory.title.unwrap_or_default(),
1048                    })
1049                })
1050                .collect();
1051
1052            // Determine status based on vulnerability severities
1053            let has_critical_or_high = vulns.iter().any(|v| {
1054                let sev = v.severity.to_lowercase();
1055                sev == "critical" || sev == "high"
1056            });
1057            let has_medium = vulns.iter().any(|v| v.severity.to_lowercase() == "medium");
1058
1059            let status = if has_critical_or_high {
1060                GateStatus::Fail
1061            } else if has_medium {
1062                GateStatus::Warn
1063            } else {
1064                GateStatus::Pass
1065            };
1066
1067            (vulns, db_version, status)
1068        }
1069        Err(_) => {
1070            // Failed to parse JSON, return Pending
1071            (Vec::new(), None, GateStatus::Pending)
1072        }
1073    };
1074
1075    Ok(Some(SupplyChainGate {
1076        meta: GateMeta {
1077            status,
1078            source: EvidenceSource::RanLocal,
1079            commit_match: CommitMatch::Unknown,
1080            scope: ScopeCoverage {
1081                relevant: vec!["Cargo.lock".to_string()],
1082                tested: vec!["Cargo.lock".to_string()],
1083                ratio: 1.0,
1084                lines_relevant: None,
1085                lines_tested: None,
1086            },
1087            evidence_commit: None,
1088            evidence_generated_at_ms: None,
1089        },
1090        vulnerabilities,
1091        denied: Vec::new(),
1092        advisory_db_version,
1093    }))
1094}
1095
1096// =============================================================================
1097// Determinism gate
1098// =============================================================================
1099
1100/// Compute determinism gate.
1101/// Compares expected source hash (from baseline) with a fresh hash of the repo.
1102#[cfg(feature = "git")]
1103pub fn compute_determinism_gate(
1104    repo_root: &Path,
1105    baseline_path: Option<&Path>,
1106) -> Result<Option<DeterminismGate>> {
1107    use tokmd_analysis_types::ComplexityBaseline;
1108
1109    fn short16(s: &str) -> &str {
1110        s.get(..16).unwrap_or(s)
1111    }
1112
1113    // Resolve baseline: explicit path or default location
1114    let resolved_path = match baseline_path {
1115        Some(p) => p.to_path_buf(),
1116        None => repo_root.join(".tokmd/baseline.json"),
1117    };
1118
1119    // If no baseline file exists, skip the gate
1120    if !resolved_path.exists() {
1121        return Ok(None);
1122    }
1123
1124    // Parse baseline
1125    let content = std::fs::read_to_string(&resolved_path)
1126        .with_context(|| format!("failed to read baseline at {}", resolved_path.display()))?;
1127    let json: serde_json::Value = serde_json::from_str(&content).with_context(|| {
1128        format!(
1129            "failed to parse baseline JSON at {}",
1130            resolved_path.display()
1131        )
1132    })?;
1133    let baseline: ComplexityBaseline = match serde_json::from_value(json.clone()) {
1134        Ok(parsed) => parsed,
1135        Err(_) => {
1136            // Allow cockpit receipts for trend comparison; determinism data is unavailable there.
1137            let mode = json
1138                .get("mode")
1139                .and_then(|v| v.as_str())
1140                .unwrap_or_default();
1141            if mode == "cockpit" {
1142                return Ok(None);
1143            }
1144            bail!(
1145                "baseline JSON at {} is not a ComplexityBaseline (and not a cockpit receipt)",
1146                resolved_path.display()
1147            );
1148        }
1149    };
1150
1151    // If baseline has no determinism section, skip the gate
1152    let det = match &baseline.determinism {
1153        Some(d) => d,
1154        None => return Ok(None),
1155    };
1156
1157    // Recompute current source hash by walking the repo, excluding the baseline file itself
1158    let baseline_rel = resolved_path
1159        .strip_prefix(repo_root)
1160        .ok()
1161        .map(|p| p.to_string_lossy().replace('\\', "/"));
1162    let exclude: Vec<&str> = baseline_rel.as_deref().into_iter().collect();
1163    let actual_hash = determinism::hash_files_from_walk(repo_root, &exclude)?;
1164    let expected_hash = &det.source_hash;
1165
1166    let mut differences = Vec::new();
1167
1168    if actual_hash != *expected_hash {
1169        differences.push(format!(
1170            "source hash mismatch: expected {}, got {}",
1171            short16(expected_hash),
1172            short16(&actual_hash),
1173        ));
1174    }
1175
1176    // Check Cargo.lock hash if baseline had one
1177    if let Some(expected_lock) = &det.cargo_lock_hash {
1178        let actual_lock = determinism::hash_cargo_lock(repo_root)?;
1179        match actual_lock {
1180            Some(ref actual) if actual != expected_lock => {
1181                differences.push(format!(
1182                    "Cargo.lock hash mismatch: expected {}, got {}",
1183                    short16(expected_lock),
1184                    short16(actual),
1185                ));
1186            }
1187            None => {
1188                differences.push("Cargo.lock missing (was present in baseline)".to_string());
1189            }
1190            _ => {}
1191        }
1192    }
1193
1194    let status = if differences.is_empty() {
1195        GateStatus::Pass
1196    } else {
1197        GateStatus::Warn
1198    };
1199
1200    Ok(Some(DeterminismGate {
1201        meta: GateMeta {
1202            status,
1203            source: EvidenceSource::RanLocal,
1204            commit_match: CommitMatch::Unknown,
1205            scope: ScopeCoverage {
1206                relevant: vec!["source files".to_string()],
1207                tested: vec!["source files".to_string()],
1208                ratio: 1.0,
1209                lines_relevant: None,
1210                lines_tested: None,
1211            },
1212            evidence_commit: None,
1213            evidence_generated_at_ms: None,
1214        },
1215        expected_hash: Some(expected_hash.clone()),
1216        actual_hash: Some(actual_hash),
1217        algo: "blake3".to_string(),
1218        differences,
1219    }))
1220}
1221
1222// =============================================================================
1223// Complexity gate
1224// =============================================================================
1225
1226/// Compute complexity gate.
1227/// Analyzes cyclomatic complexity of changed Rust source files.
1228#[cfg(feature = "git")]
1229fn compute_complexity_gate(
1230    repo_root: &Path,
1231    changed_files: &[FileStat],
1232) -> Result<Option<ComplexityGate>> {
1233    // Filter to relevant Rust source files
1234    let relevant_files: Vec<String> = changed_files
1235        .iter()
1236        .filter(|f| is_relevant_rust_source(&f.path))
1237        .map(|f| f.path.clone())
1238        .collect();
1239
1240    // If no relevant files, skip
1241    if relevant_files.is_empty() {
1242        return Ok(None);
1243    }
1244
1245    let mut high_complexity_files = Vec::new();
1246    let mut total_complexity: u64 = 0;
1247    let mut max_cyclomatic: u32 = 0;
1248    let mut files_analyzed: usize = 0;
1249
1250    for file_path in &relevant_files {
1251        let full_path = repo_root.join(file_path);
1252        if !full_path.exists() {
1253            continue;
1254        }
1255
1256        if let Ok(content) = std::fs::read_to_string(&full_path) {
1257            let analysis = analyze_rust_complexity(&content);
1258            files_analyzed += 1;
1259            total_complexity += analysis.total_complexity as u64;
1260            max_cyclomatic = max_cyclomatic.max(analysis.max_complexity);
1261
1262            if analysis.max_complexity > COMPLEXITY_THRESHOLD {
1263                high_complexity_files.push(HighComplexityFile {
1264                    path: file_path.clone(),
1265                    cyclomatic: analysis.max_complexity,
1266                    function_count: analysis.function_count,
1267                    max_function_length: analysis.max_function_length,
1268                });
1269            }
1270        }
1271    }
1272
1273    // Sort high complexity files by cyclomatic complexity (descending), then path for determinism
1274    high_complexity_files.sort_by(|a, b| {
1275        b.cyclomatic
1276            .cmp(&a.cyclomatic)
1277            .then_with(|| a.path.cmp(&b.path))
1278    });
1279
1280    let avg_cyclomatic = if files_analyzed > 0 {
1281        round_pct(total_complexity as f64 / files_analyzed as f64)
1282    } else {
1283        0.0
1284    };
1285
1286    // Determine gate status:
1287    // - Pass: no high complexity files
1288    // - Warn (represented as Pending): 1-3 high complexity files
1289    // - Fail: >3 high complexity files
1290    let high_count = high_complexity_files.len();
1291    let (status, threshold_exceeded) = match high_count {
1292        0 => (GateStatus::Pass, false),
1293        1..=3 => (GateStatus::Warn, true),
1294        _ => (GateStatus::Fail, true),
1295    };
1296
1297    Ok(Some(ComplexityGate {
1298        meta: GateMeta {
1299            status,
1300            source: EvidenceSource::RanLocal,
1301            commit_match: CommitMatch::Exact,
1302            scope: ScopeCoverage {
1303                relevant: relevant_files.clone(),
1304                tested: relevant_files,
1305                ratio: 1.0,
1306                lines_relevant: None,
1307                lines_tested: None,
1308            },
1309            evidence_commit: None,
1310            evidence_generated_at_ms: Some(
1311                std::time::SystemTime::now()
1312                    .duration_since(std::time::UNIX_EPOCH)
1313                    .unwrap_or_default()
1314                    .as_millis() as u64,
1315            ),
1316        },
1317        files_analyzed,
1318        high_complexity_files,
1319        avg_cyclomatic,
1320        max_cyclomatic,
1321        threshold_exceeded,
1322    }))
1323}
1324
1325/// Results from analyzing a Rust file's complexity.
1326#[cfg(feature = "git")]
1327struct ComplexityAnalysis {
1328    /// Total cyclomatic complexity across all functions.
1329    total_complexity: u32,
1330    /// Maximum complexity of any single function.
1331    max_complexity: u32,
1332    /// Number of functions found.
1333    function_count: usize,
1334    /// Maximum function length in lines.
1335    max_function_length: usize,
1336}
1337
1338/// Analyze the cyclomatic complexity of Rust source code.
1339/// Uses a simple heuristic approach counting decision points.
1340#[cfg(feature = "git")]
1341fn analyze_rust_complexity(content: &str) -> ComplexityAnalysis {
1342    let mut total_complexity: u32 = 0;
1343    let mut max_complexity: u32 = 0;
1344    let mut function_count: usize = 0;
1345    let mut max_function_length: usize = 0;
1346
1347    let mut in_function = false;
1348    let mut brace_depth: i32 = 0;
1349    let mut function_brace_depth: i32 = 0; // Depth when function started
1350    let mut function_start_line: usize = 0;
1351    let mut current_complexity: u32 = 1; // Start at 1 for the function itself
1352    let mut in_string = false;
1353    let mut in_char = false;
1354    let mut in_block_comment = false;
1355
1356    for (line_idx, line) in content.lines().enumerate() {
1357        let trimmed = line.trim();
1358
1359        // Skip empty lines
1360        if trimmed.is_empty() {
1361            continue;
1362        }
1363
1364        // Check for function start BEFORE processing braces
1365        // (so we can track the starting brace depth correctly)
1366        let is_fn_start = !in_function
1367            && !in_block_comment
1368            && (trimmed.starts_with("fn ")
1369                || trimmed.starts_with("pub fn ")
1370                || trimmed.starts_with("pub(crate) fn ")
1371                || trimmed.starts_with("pub(super) fn ")
1372                || trimmed.starts_with("async fn ")
1373                || trimmed.starts_with("pub async fn ")
1374                || trimmed.starts_with("const fn ")
1375                || trimmed.starts_with("pub const fn ")
1376                || trimmed.starts_with("unsafe fn ")
1377                || trimmed.starts_with("pub unsafe fn "));
1378
1379        if is_fn_start {
1380            in_function = true;
1381            function_start_line = line_idx;
1382            function_brace_depth = brace_depth;
1383            current_complexity = 1;
1384        }
1385
1386        let mut in_line_comment = false;
1387
1388        // Simple state machine for parsing
1389        let chars: Vec<char> = line.chars().collect();
1390        let mut i = 0;
1391        while i < chars.len() {
1392            let c = chars[i];
1393            let next = chars.get(i + 1).copied();
1394
1395            // Handle block comments
1396            if in_block_comment {
1397                if c == '*' && next == Some('/') {
1398                    in_block_comment = false;
1399                    i += 2;
1400                    continue;
1401                }
1402                i += 1;
1403                continue;
1404            }
1405
1406            // Handle line comments
1407            if c == '/' && next == Some('/') {
1408                in_line_comment = true;
1409                break;
1410            }
1411
1412            // Handle block comment start
1413            if c == '/' && next == Some('*') {
1414                in_block_comment = true;
1415                i += 2;
1416                continue;
1417            }
1418
1419            // Handle strings
1420            if !in_char && c == '"' && (i == 0 || chars[i - 1] != '\\') {
1421                in_string = !in_string;
1422                i += 1;
1423                continue;
1424            }
1425
1426            // Handle chars
1427            if !in_string && c == '\'' && (i == 0 || chars[i - 1] != '\\') {
1428                in_char = !in_char;
1429                i += 1;
1430                continue;
1431            }
1432
1433            // Skip if in string or char
1434            if in_string || in_char {
1435                i += 1;
1436                continue;
1437            }
1438
1439            // Track brace depth
1440            if c == '{' {
1441                brace_depth += 1;
1442            } else if c == '}' {
1443                brace_depth -= 1;
1444                if in_function && brace_depth == function_brace_depth {
1445                    // End of function
1446                    let function_length = line_idx - function_start_line + 1;
1447                    max_function_length = max_function_length.max(function_length);
1448                    total_complexity += current_complexity;
1449                    max_complexity = max_complexity.max(current_complexity);
1450                    function_count += 1;
1451                    in_function = false;
1452                    current_complexity = 1;
1453                }
1454            }
1455
1456            i += 1;
1457        }
1458
1459        // Skip complexity counting if in comment
1460        if in_line_comment || in_block_comment {
1461            continue;
1462        }
1463
1464        // Count decision points for complexity (only inside functions)
1465        if in_function {
1466            // Count control flow keywords
1467            let keywords = [
1468                "if ", "else if ", "while ", "for ", "loop ", "match ", "&&", "||", "?",
1469            ];
1470            for kw in &keywords {
1471                // Count occurrences of each keyword
1472                let mut search_line = trimmed;
1473                while let Some(pos) = search_line.find(kw) {
1474                    current_complexity += 1;
1475                    search_line = &search_line[pos + kw.len()..];
1476                }
1477            }
1478
1479            // Count match arms (each => in a match adds complexity)
1480            if trimmed.contains("=>") && !trimmed.starts_with("//") {
1481                // Count number of => in the line
1482                let arrow_count = trimmed.matches("=>").count();
1483                current_complexity += arrow_count as u32;
1484            }
1485        }
1486    }
1487
1488    // Handle case where file ends without closing brace
1489    if in_function {
1490        function_count += 1;
1491        total_complexity += current_complexity;
1492        max_complexity = max_complexity.max(current_complexity);
1493    }
1494
1495    ComplexityAnalysis {
1496        total_complexity,
1497        max_complexity,
1498        function_count,
1499        max_function_length,
1500    }
1501}
1502
1503/// Check if a file is a relevant Rust source file for mutation testing.
1504/// Excludes test files, fuzz targets, etc.
1505#[cfg(feature = "git")]
1506fn is_relevant_rust_source(path: &str) -> bool {
1507    let path_lower = path.to_lowercase();
1508
1509    // Must be a .rs file
1510    if !path_lower.ends_with(".rs") {
1511        return false;
1512    }
1513
1514    // Exclude test directories
1515    if path_lower.contains("/tests/") || path_lower.starts_with("tests/") {
1516        return false;
1517    }
1518
1519    // Exclude test files
1520    if path_lower.ends_with("_test.rs") || path_lower.ends_with("_tests.rs") {
1521        return false;
1522    }
1523
1524    // Exclude fuzz targets
1525    if path_lower.contains("/fuzz/") || path_lower.starts_with("fuzz/") {
1526        return false;
1527    }
1528
1529    true
1530}
1531
1532// =============================================================================
1533// Mutation gate
1534// =============================================================================
1535
1536/// Get the current HEAD commit hash.
1537#[cfg(feature = "git")]
1538fn get_head_commit(repo_root: &PathBuf) -> Result<String> {
1539    let output = Command::new("git")
1540        .arg("-C")
1541        .arg(repo_root)
1542        .arg("rev-parse")
1543        .arg("HEAD")
1544        .output()
1545        .context("Failed to run git rev-parse HEAD")?;
1546
1547    if !output.status.success() {
1548        let stderr = String::from_utf8_lossy(&output.stderr);
1549        bail!("git rev-parse HEAD failed: {}", stderr.trim());
1550    }
1551
1552    Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
1553}
1554
1555/// CI workflow summary format (mutants-summary.json).
1556#[derive(Debug, Clone, Deserialize)]
1557#[cfg(feature = "git")]
1558struct CiMutantsSummary {
1559    commit: String,
1560    status: String,
1561    scope: Vec<String>,
1562    survivors: Vec<CiSurvivor>,
1563    killed: usize,
1564    timeout: usize,
1565    unviable: usize,
1566}
1567
1568#[derive(Debug, Clone, Deserialize)]
1569#[cfg(feature = "git")]
1570struct CiSurvivor {
1571    file: String,
1572    line: usize,
1573    mutation: String,
1574}
1575
1576/// Compute the mutation gate status.
1577#[cfg(feature = "git")]
1578fn compute_mutation_gate(
1579    repo_root: &PathBuf,
1580    _base: &str,
1581    _head: &str,
1582    changed_files: &[FileStat],
1583    _range_mode: tokmd_git::GitRangeMode,
1584) -> Result<MutationGate> {
1585    // Filter to relevant Rust source files
1586    let relevant_files: Vec<String> = changed_files
1587        .iter()
1588        .filter(|f| is_relevant_rust_source(&f.path))
1589        .map(|f| f.path.clone())
1590        .collect();
1591
1592    // If no relevant files, skip
1593    if relevant_files.is_empty() {
1594        return Ok(MutationGate {
1595            meta: GateMeta {
1596                status: GateStatus::Skipped,
1597                source: EvidenceSource::RanLocal,
1598                commit_match: CommitMatch::Unknown,
1599                scope: ScopeCoverage {
1600                    relevant: Vec::new(),
1601                    tested: Vec::new(),
1602                    ratio: 1.0,
1603                    lines_relevant: None,
1604                    lines_tested: None,
1605                },
1606                evidence_commit: None,
1607                evidence_generated_at_ms: None,
1608            },
1609            survivors: Vec::new(),
1610            killed: 0,
1611            timeout: 0,
1612            unviable: 0,
1613        });
1614    }
1615
1616    let head_commit = get_head_commit(repo_root)?;
1617
1618    // Try to find cached results
1619    if let Some(gate) = try_load_ci_artifact(repo_root, &head_commit, &relevant_files)? {
1620        return Ok(gate);
1621    }
1622
1623    if let Some(gate) = try_load_cached(repo_root, &head_commit, &relevant_files)? {
1624        return Ok(gate);
1625    }
1626
1627    // Try to run mutations
1628    run_mutations(repo_root, &relevant_files)
1629}
1630
1631/// Try to load mutation results from CI artifact.
1632/// Checks for mutants-summary.json (our format) first, then falls back to mutants.out/outcomes.json.
1633#[cfg(feature = "git")]
1634fn try_load_ci_artifact(
1635    repo_root: &Path,
1636    head_commit: &str,
1637    relevant_files: &[String],
1638) -> Result<Option<MutationGate>> {
1639    // First, check for our summary format (mutants-summary.json)
1640    let summary_path = repo_root.join("mutants-summary.json");
1641    if summary_path.exists()
1642        && let Ok(content) = std::fs::read_to_string(&summary_path)
1643        && let Ok(summary) = serde_json::from_str::<CiMutantsSummary>(&content)
1644    {
1645        // Determine commit match quality
1646        let commit_match = if summary.commit.starts_with(head_commit)
1647            || head_commit.starts_with(&summary.commit)
1648        {
1649            CommitMatch::Exact
1650        } else {
1651            CommitMatch::Stale
1652        };
1653
1654        // Skip stale artifacts
1655        if commit_match == CommitMatch::Stale {
1656            return Ok(None);
1657        }
1658
1659        let status = match summary.status.as_str() {
1660            "pass" => GateStatus::Pass,
1661            "fail" => GateStatus::Fail,
1662            "skipped" => GateStatus::Skipped,
1663            _ => GateStatus::Pending,
1664        };
1665
1666        let survivors: Vec<MutationSurvivor> = summary
1667            .survivors
1668            .into_iter()
1669            .map(|s| MutationSurvivor {
1670                file: s.file,
1671                line: s.line,
1672                mutation: s.mutation,
1673            })
1674            .collect();
1675
1676        let tested = summary.scope.clone();
1677        let scope_ratio = if relevant_files.is_empty() {
1678            1.0
1679        } else {
1680            tested.len() as f64 / relevant_files.len() as f64
1681        };
1682
1683        let gate = MutationGate {
1684            meta: GateMeta {
1685                status,
1686                source: EvidenceSource::CiArtifact,
1687                commit_match,
1688                scope: ScopeCoverage {
1689                    relevant: relevant_files.to_vec(),
1690                    tested,
1691                    ratio: scope_ratio.min(1.0),
1692                    lines_relevant: None,
1693                    lines_tested: None,
1694                },
1695                evidence_commit: Some(summary.commit),
1696                evidence_generated_at_ms: None,
1697            },
1698            survivors,
1699            killed: summary.killed,
1700            timeout: summary.timeout,
1701            unviable: summary.unviable,
1702        };
1703
1704        Ok(Some(gate))
1705    } else {
1706        Ok(None)
1707    }
1708}
1709
1710/// Try to load cached mutation results.
1711#[cfg(feature = "git")]
1712fn try_load_cached(
1713    repo_root: &Path,
1714    head_commit: &str,
1715    relevant_files: &[String],
1716) -> Result<Option<MutationGate>> {
1717    let cache_dir = repo_root.join(".tokmd/cache/mutants");
1718    if !cache_dir.exists() {
1719        return Ok(None);
1720    }
1721
1722    let cache_file = cache_dir.join(format!("{}.json", head_commit));
1723    if !cache_file.exists() {
1724        return Ok(None);
1725    }
1726
1727    let content = std::fs::read_to_string(&cache_file)?;
1728    let gate: MutationGate = serde_json::from_str(&content)?;
1729
1730    // Verify scope hasn't changed significantly
1731    let tested = &gate.meta.scope.tested;
1732    let missing_files: Vec<_> = relevant_files
1733        .iter()
1734        .filter(|f| !tested.contains(f))
1735        .collect();
1736
1737    if !missing_files.is_empty() {
1738        // Cache is partial
1739        return Ok(None);
1740    }
1741
1742    Ok(Some(gate))
1743}
1744
1745/// Run mutations locally.
1746#[cfg(feature = "git")]
1747fn run_mutations(_repo_root: &Path, relevant_files: &[String]) -> Result<MutationGate> {
1748    // This is expensive, so we only do it if explicitly asked or no other choice
1749    // For now, return Pending
1750    Ok(MutationGate {
1751        meta: GateMeta {
1752            status: GateStatus::Pending,
1753            source: EvidenceSource::RanLocal,
1754            commit_match: CommitMatch::Exact,
1755            scope: ScopeCoverage {
1756                relevant: relevant_files.to_vec(),
1757                tested: Vec::new(),
1758                ratio: 0.0,
1759                lines_relevant: None,
1760                lines_tested: None,
1761            },
1762            evidence_commit: None,
1763            evidence_generated_at_ms: None,
1764        },
1765        survivors: Vec::new(),
1766        killed: 0,
1767        timeout: 0,
1768        unviable: 0,
1769    })
1770}
1771
1772// =============================================================================
1773// File stats and change surface
1774// =============================================================================
1775
1776/// Get file stats for changed files.
1777#[cfg(feature = "git")]
1778pub fn get_file_stats(
1779    repo_root: &Path,
1780    base: &str,
1781    head: &str,
1782    range_mode: tokmd_git::GitRangeMode,
1783) -> Result<Vec<FileStat>> {
1784    let range = range_mode.format(base, head);
1785    let output = Command::new("git")
1786        .arg("-C")
1787        .arg(repo_root)
1788        .args(["diff", "--numstat", &range])
1789        .output()
1790        .context("Failed to run git diff --numstat")?;
1791
1792    if !output.status.success() {
1793        let stderr = String::from_utf8_lossy(&output.stderr);
1794        bail!("git diff --numstat failed: {}", stderr.trim());
1795    }
1796
1797    let stdout = String::from_utf8_lossy(&output.stdout);
1798    let mut stats = Vec::new();
1799
1800    for line in stdout.lines() {
1801        let parts: Vec<&str> = line.split('\t').collect();
1802        if parts.len() == 3 {
1803            let insertions = parts[0].parse().unwrap_or(0);
1804            let deletions = parts[1].parse().unwrap_or(0);
1805            let path = parts[2].to_string();
1806            stats.push(FileStat {
1807                path,
1808                insertions,
1809                deletions,
1810            });
1811        }
1812    }
1813
1814    Ok(stats)
1815}
1816
1817/// Compute change surface metrics.
1818#[cfg(feature = "git")]
1819fn compute_change_surface(
1820    repo_root: &Path,
1821    base: &str,
1822    head: &str,
1823    file_stats: &[FileStat],
1824    range_mode: tokmd_git::GitRangeMode,
1825) -> Result<ChangeSurface> {
1826    let range = range_mode.format(base, head);
1827    let output = Command::new("git")
1828        .arg("-C")
1829        .arg(repo_root)
1830        .args(["rev-list", "--count", &range])
1831        .output()
1832        .context("Failed to run git rev-list --count")?;
1833
1834    let commits = String::from_utf8_lossy(&output.stdout)
1835        .trim()
1836        .parse()
1837        .unwrap_or(0);
1838
1839    let files_changed = file_stats.len();
1840    let insertions = file_stats.iter().map(|s| s.insertions).sum();
1841    let deletions = file_stats.iter().map(|s| s.deletions).sum();
1842    let net_lines = (insertions as i64) - (deletions as i64);
1843
1844    let churn_velocity = if commits > 0 {
1845        (insertions + deletions) as f64 / commits as f64
1846    } else {
1847        0.0
1848    };
1849
1850    // Simple change concentration: what % of changes are in top 20% of files
1851    let mut changes: Vec<usize> = file_stats
1852        .iter()
1853        .map(|s| s.insertions + s.deletions)
1854        .collect();
1855    changes.sort_unstable_by(|a, b| b.cmp(a));
1856
1857    let top_count = (files_changed as f64 * 0.2).ceil() as usize;
1858    let total_changes: usize = changes.iter().sum();
1859    let top_changes: usize = changes.iter().take(top_count).sum();
1860
1861    let change_concentration = if total_changes > 0 {
1862        top_changes as f64 / total_changes as f64
1863    } else {
1864        0.0
1865    };
1866
1867    Ok(ChangeSurface {
1868        commits,
1869        files_changed,
1870        insertions,
1871        deletions,
1872        net_lines,
1873        churn_velocity,
1874        change_concentration,
1875    })
1876}
1877
1878// =============================================================================
1879// Composition, contracts, health, risk, review plan
1880// =============================================================================
1881
1882/// Compute composition metrics.
1883pub fn compute_composition<S: AsRef<str>>(files: &[S]) -> Composition {
1884    let mut code = 0;
1885    let mut test = 0;
1886    let mut docs = 0;
1887    let mut config = 0;
1888
1889    for file in files.iter() {
1890        let path = file.as_ref().to_lowercase();
1891        if path.ends_with(".rs")
1892            || path.ends_with(".js")
1893            || path.ends_with(".ts")
1894            || path.ends_with(".py")
1895        {
1896            if path.contains("test") || path.contains("_spec") {
1897                test += 1;
1898            } else {
1899                code += 1;
1900            }
1901        } else if path.ends_with(".md") || path.contains("/docs/") {
1902            docs += 1;
1903        } else if path.ends_with(".toml")
1904            || path.ends_with(".json")
1905            || path.ends_with(".yml")
1906            || path.ends_with(".yaml")
1907        {
1908            config += 1;
1909        }
1910    }
1911
1912    let total = (code + test + docs + config) as f64;
1913    let (code_pct, test_pct, docs_pct, config_pct) = if total > 0.0 {
1914        (
1915            code as f64 / total,
1916            test as f64 / total,
1917            docs as f64 / total,
1918            config as f64 / total,
1919        )
1920    } else {
1921        (0.0, 0.0, 0.0, 0.0)
1922    };
1923
1924    let test_ratio = if code > 0 {
1925        test as f64 / code as f64
1926    } else if test > 0 {
1927        1.0
1928    } else {
1929        0.0
1930    };
1931
1932    Composition {
1933        code_pct,
1934        test_pct,
1935        docs_pct,
1936        config_pct,
1937        test_ratio,
1938    }
1939}
1940
1941/// Detect contract changes.
1942pub fn detect_contracts<S: AsRef<str>>(files: &[S]) -> Contracts {
1943    let mut api_changed = false;
1944    let mut cli_changed = false;
1945    let mut schema_changed = false;
1946    let mut breaking_indicators = 0;
1947
1948    for file in files.iter() {
1949        if file.as_ref().ends_with("lib.rs") || file.as_ref().ends_with("mod.rs") {
1950            api_changed = true;
1951        }
1952        if file.as_ref().contains("crates/tokmd/src/commands/")
1953            || file.as_ref().contains("crates/tokmd-config/")
1954        {
1955            cli_changed = true;
1956        }
1957        if file.as_ref() == "docs/schema.json" || file.as_ref() == "docs/SCHEMA.md" {
1958            schema_changed = true;
1959        }
1960    }
1961
1962    if api_changed {
1963        breaking_indicators += 1;
1964    }
1965    if schema_changed {
1966        breaking_indicators += 1;
1967    }
1968
1969    Contracts {
1970        api_changed,
1971        cli_changed,
1972        schema_changed,
1973        breaking_indicators,
1974    }
1975}
1976
1977/// Compute code health metrics.
1978pub fn compute_code_health(file_stats: &[FileStat], contracts: &Contracts) -> CodeHealth {
1979    let mut large_files_touched = 0;
1980    let mut total_lines = 0;
1981
1982    for stat in file_stats {
1983        let lines = stat.insertions + stat.deletions;
1984        if lines > 500 {
1985            large_files_touched += 1;
1986        }
1987        total_lines += lines;
1988    }
1989
1990    let avg_file_size = if !file_stats.is_empty() {
1991        total_lines / file_stats.len()
1992    } else {
1993        0
1994    };
1995
1996    let complexity_indicator = if large_files_touched > 5 {
1997        ComplexityIndicator::Critical
1998    } else if large_files_touched > 2 {
1999        ComplexityIndicator::High
2000    } else if large_files_touched > 0 {
2001        ComplexityIndicator::Medium
2002    } else {
2003        ComplexityIndicator::Low
2004    };
2005
2006    let mut warnings = Vec::new();
2007    for stat in file_stats {
2008        if stat.insertions + stat.deletions > 500 {
2009            warnings.push(HealthWarning {
2010                path: stat.path.clone(),
2011                warning_type: WarningType::LargeFile,
2012                message: "Large file touched".to_string(),
2013            });
2014        }
2015    }
2016
2017    let mut score: u32 = 100;
2018    score = score.saturating_sub((large_files_touched * 10) as u32);
2019    if contracts.breaking_indicators > 0 {
2020        score = score.saturating_sub(20);
2021    }
2022
2023    let grade = match score {
2024        90..=100 => "A",
2025        80..=89 => "B",
2026        70..=79 => "C",
2027        60..=69 => "D",
2028        _ => "F",
2029    }
2030    .to_string();
2031
2032    CodeHealth {
2033        score,
2034        grade,
2035        large_files_touched,
2036        avg_file_size,
2037        complexity_indicator,
2038        warnings,
2039    }
2040}
2041
2042fn compute_risk_from_iter<I>(_contracts: &Contracts, health: &CodeHealth, file_stats: I) -> Risk
2043where
2044    I: IntoIterator<Item = String>,
2045{
2046    let mut hotspots_touched = Vec::new();
2047    let bus_factor_warnings = Vec::new();
2048
2049    for path in file_stats {
2050        hotspots_touched.push(path);
2051    }
2052
2053    let score = (hotspots_touched.len() * 15 + (100 - health.score) as usize).min(100) as u32;
2054
2055    let level = match score {
2056        0..=20 => RiskLevel::Low,
2057        21..=50 => RiskLevel::Medium,
2058        51..=80 => RiskLevel::High,
2059        _ => RiskLevel::Critical,
2060    };
2061
2062    Risk {
2063        hotspots_touched,
2064        bus_factor_warnings,
2065        level,
2066        score,
2067    }
2068}
2069
2070/// Compute risk metrics for borrowed file stats.
2071pub fn compute_risk(file_stats: &[FileStat], contracts: &Contracts, health: &CodeHealth) -> Risk {
2072    compute_risk_from_iter(
2073        contracts,
2074        health,
2075        file_stats
2076            .iter()
2077            .filter(|stat| stat.insertions + stat.deletions > 300)
2078            .map(|stat| stat.path.clone()),
2079    )
2080}
2081
2082/// Internal fast path used by cockpit assembly when it already owns the stats.
2083#[cfg(feature = "git")]
2084fn compute_risk_owned(
2085    file_stats: Vec<FileStat>,
2086    contracts: &Contracts,
2087    health: &CodeHealth,
2088) -> Risk {
2089    compute_risk_from_iter(
2090        contracts,
2091        health,
2092        file_stats
2093            .into_iter()
2094            .filter(|stat| stat.insertions + stat.deletions > 300)
2095            .map(|stat| stat.path),
2096    )
2097}
2098
2099/// Generate review plan.
2100pub fn generate_review_plan(file_stats: &[FileStat], _contracts: &Contracts) -> Vec<ReviewItem> {
2101    let mut items = Vec::new();
2102
2103    for stat in file_stats {
2104        let lines = stat.insertions + stat.deletions;
2105        let priority = if lines > 200 {
2106            1
2107        } else if lines > 50 {
2108            2
2109        } else {
2110            3
2111        };
2112        let complexity = if lines > 300 {
2113            5
2114        } else if lines > 100 {
2115            3
2116        } else {
2117            1
2118        };
2119
2120        items.push(ReviewItem {
2121            path: stat.path.clone(),
2122            reason: format!("{} lines changed", lines),
2123            priority,
2124            complexity: Some(complexity),
2125            lines_changed: Some(lines),
2126        });
2127    }
2128
2129    items.sort_by(|a, b| {
2130        a.priority
2131            .cmp(&b.priority)
2132            .then_with(|| a.path.cmp(&b.path))
2133    });
2134    items
2135}
2136
2137// =============================================================================
2138// Utility helpers
2139// =============================================================================
2140
2141/// Format a float with a sign prefix.
2142pub fn format_signed_f64(value: f64) -> String {
2143    if value > 0.0 {
2144        format!("+{value:.2}")
2145    } else {
2146        format!("{value:.2}")
2147    }
2148}
2149
2150/// Human-readable label for a trend direction.
2151pub fn trend_direction_label(direction: TrendDirection) -> &'static str {
2152    match direction {
2153        TrendDirection::Improving => "improving",
2154        TrendDirection::Stable => "stable",
2155        TrendDirection::Degrading => "degrading",
2156    }
2157}
2158
2159/// Render a sparkline string from a slice of values.
2160pub fn sparkline(values: &[f64]) -> String {
2161    if values.is_empty() {
2162        return String::new();
2163    }
2164
2165    const BARS: &[char] = &[
2166        '\u{2581}', '\u{2582}', '\u{2583}', '\u{2584}', '\u{2585}', '\u{2586}', '\u{2587}',
2167        '\u{2588}',
2168    ];
2169    let min = values
2170        .iter()
2171        .copied()
2172        .fold(f64::INFINITY, |acc, v| acc.min(v));
2173    let max = values
2174        .iter()
2175        .copied()
2176        .fold(f64::NEG_INFINITY, |acc, v| acc.max(v));
2177
2178    if !min.is_finite() || !max.is_finite() {
2179        return String::new();
2180    }
2181
2182    if (max - min).abs() < f64::EPSILON {
2183        return std::iter::repeat_n(BARS[3], values.len()).collect();
2184    }
2185
2186    let span = max - min;
2187    values
2188        .iter()
2189        .map(|v| {
2190            let norm = ((v - min) / span).clamp(0.0, 1.0);
2191            let idx = (norm * (BARS.len() as f64 - 1.0)).round() as usize;
2192            BARS[idx]
2193        })
2194        .collect()
2195}
2196
2197/// Return the current time as an ISO 8601 string.
2198pub fn now_iso8601() -> String {
2199    let now = time::OffsetDateTime::now_utc();
2200    format!(
2201        "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z",
2202        now.year(),
2203        now.month() as u8,
2204        now.day(),
2205        now.hour(),
2206        now.minute(),
2207        now.second(),
2208    )
2209}
2210
2211/// Round a float to two decimal places.
2212pub fn round_pct(val: f64) -> f64 {
2213    (val * 100.0).round() / 100.0
2214}
2215
2216#[cfg(test)]
2217mod tests {
2218    use super::*;
2219
2220    // ---- round_pct ----
2221
2222    #[test]
2223    fn test_round_pct_basic() {
2224        assert_eq!(round_pct(0.123456), 0.12);
2225        assert_eq!(round_pct(0.999), 1.0);
2226        assert_eq!(round_pct(0.0), 0.0);
2227    }
2228
2229    #[test]
2230    fn test_round_pct_rounding_up() {
2231        assert_eq!(round_pct(0.125), 0.13);
2232    }
2233
2234    #[test]
2235    fn test_round_pct_negative() {
2236        assert_eq!(round_pct(-0.567), -0.57);
2237    }
2238
2239    // ---- format_signed_f64 ----
2240
2241    #[test]
2242    fn test_format_signed_positive() {
2243        assert_eq!(format_signed_f64(5.0), "+5.00");
2244        assert_eq!(format_signed_f64(0.5), "+0.50");
2245    }
2246
2247    #[test]
2248    fn test_format_signed_negative() {
2249        assert_eq!(format_signed_f64(-2.50), "-2.50");
2250    }
2251
2252    #[test]
2253    fn test_format_signed_zero() {
2254        assert_eq!(format_signed_f64(0.0), "0.00");
2255    }
2256
2257    // ---- trend_direction_label ----
2258
2259    #[test]
2260    fn test_trend_direction_labels() {
2261        assert_eq!(
2262            trend_direction_label(TrendDirection::Improving),
2263            "improving"
2264        );
2265        assert_eq!(trend_direction_label(TrendDirection::Stable), "stable");
2266        assert_eq!(
2267            trend_direction_label(TrendDirection::Degrading),
2268            "degrading"
2269        );
2270    }
2271
2272    // ---- sparkline ----
2273
2274    #[test]
2275    fn test_sparkline_empty() {
2276        assert_eq!(sparkline(&[]), "");
2277    }
2278
2279    #[test]
2280    fn test_sparkline_single_value() {
2281        let result = sparkline(&[5.0]);
2282        assert_eq!(result.chars().count(), 1);
2283    }
2284
2285    #[test]
2286    fn test_sparkline_ascending() {
2287        let result = sparkline(&[0.0, 25.0, 50.0, 75.0, 100.0]);
2288        assert_eq!(result.chars().count(), 5);
2289        let chars: Vec<char> = result.chars().collect();
2290        // First should be lowest bar, last should be highest
2291        assert_eq!(chars[0], '\u{2581}');
2292        assert_eq!(chars[4], '\u{2588}');
2293    }
2294
2295    #[test]
2296    fn test_sparkline_constant_values() {
2297        let result = sparkline(&[42.0, 42.0, 42.0]);
2298        assert_eq!(result.chars().count(), 3);
2299        let chars: Vec<char> = result.chars().collect();
2300        // All should be same middle bar
2301        assert_eq!(chars[0], chars[1]);
2302        assert_eq!(chars[1], chars[2]);
2303    }
2304
2305    // ---- compute_metric_trend ----
2306
2307    #[test]
2308    fn test_metric_trend_improving_higher_is_better() {
2309        let trend = compute_metric_trend(90.0, 80.0, true);
2310        assert_eq!(trend.direction, TrendDirection::Improving);
2311        assert_eq!(trend.delta, 10.0);
2312        assert!(trend.delta_pct > 0.0);
2313    }
2314
2315    #[test]
2316    fn test_metric_trend_degrading_higher_is_better() {
2317        let trend = compute_metric_trend(70.0, 80.0, true);
2318        assert_eq!(trend.direction, TrendDirection::Degrading);
2319        assert_eq!(trend.delta, -10.0);
2320    }
2321
2322    #[test]
2323    fn test_metric_trend_stable() {
2324        let trend = compute_metric_trend(80.0, 80.0, true);
2325        assert_eq!(trend.direction, TrendDirection::Stable);
2326    }
2327
2328    #[test]
2329    fn test_metric_trend_improving_lower_is_better() {
2330        // Risk: lower is better
2331        let trend = compute_metric_trend(30.0, 50.0, false);
2332        assert_eq!(trend.direction, TrendDirection::Improving);
2333    }
2334
2335    #[test]
2336    fn test_metric_trend_degrading_lower_is_better() {
2337        let trend = compute_metric_trend(50.0, 30.0, false);
2338        assert_eq!(trend.direction, TrendDirection::Degrading);
2339    }
2340
2341    #[test]
2342    fn test_metric_trend_from_zero() {
2343        let trend = compute_metric_trend(10.0, 0.0, true);
2344        assert_eq!(trend.delta_pct, 100.0);
2345    }
2346
2347    #[test]
2348    fn test_metric_trend_both_zero() {
2349        let trend = compute_metric_trend(0.0, 0.0, true);
2350        assert_eq!(trend.delta_pct, 0.0);
2351        assert_eq!(trend.direction, TrendDirection::Stable);
2352    }
2353
2354    // ---- compute_composition ----
2355
2356    #[test]
2357    fn test_composition_mixed_files() {
2358        let files = vec![
2359            "src/main.rs",
2360            "src/lib.rs",
2361            "tests/test_main.rs",
2362            "README.md",
2363            "Cargo.toml",
2364        ];
2365        let comp = compute_composition(&files);
2366        assert!(comp.code_pct > 0.0);
2367        assert!(comp.test_pct > 0.0);
2368        assert!(comp.docs_pct > 0.0);
2369        assert!(comp.config_pct > 0.0);
2370    }
2371
2372    #[test]
2373    fn test_composition_empty_input() {
2374        let files: Vec<&str> = vec![];
2375        let comp = compute_composition(&files);
2376        assert_eq!(comp.code_pct, 0.0);
2377        assert_eq!(comp.test_pct, 0.0);
2378        assert_eq!(comp.test_ratio, 0.0);
2379    }
2380
2381    #[test]
2382    fn test_composition_only_code() {
2383        let files = vec!["src/main.rs", "src/lib.rs"];
2384        let comp = compute_composition(&files);
2385        assert_eq!(comp.code_pct, 1.0);
2386        assert_eq!(comp.test_pct, 0.0);
2387        assert_eq!(comp.test_ratio, 0.0);
2388    }
2389
2390    #[test]
2391    fn test_composition_test_ratio() {
2392        let files = vec![
2393            "src/main.rs",
2394            "src/lib.rs",
2395            "tests/test_main.rs",
2396            "tests/test_lib.rs",
2397        ];
2398        let comp = compute_composition(&files);
2399        // 2 code files, 2 test files → ratio = 1.0
2400        assert_eq!(comp.test_ratio, 1.0);
2401    }
2402
2403    #[test]
2404    fn test_composition_only_tests() {
2405        let files = vec!["tests/test_main.rs", "tests/test_lib.rs"];
2406        let comp = compute_composition(&files);
2407        assert_eq!(comp.code_pct, 0.0);
2408        assert_eq!(comp.test_pct, 1.0);
2409        // No code files, but tests exist → test_ratio = 1.0
2410        assert_eq!(comp.test_ratio, 1.0);
2411    }
2412
2413    // ---- detect_contracts ----
2414
2415    #[test]
2416    fn test_detect_contracts_api() {
2417        let files = vec!["crates/tokmd-types/src/lib.rs"];
2418        let contracts = detect_contracts(&files);
2419        assert!(contracts.api_changed);
2420        assert!(!contracts.cli_changed);
2421        assert!(!contracts.schema_changed);
2422        assert_eq!(contracts.breaking_indicators, 1);
2423    }
2424
2425    #[test]
2426    fn test_detect_contracts_cli() {
2427        let files = vec!["crates/tokmd/src/commands/lang.rs"];
2428        let contracts = detect_contracts(&files);
2429        assert!(!contracts.api_changed);
2430        assert!(contracts.cli_changed);
2431    }
2432
2433    #[test]
2434    fn test_detect_contracts_schema() {
2435        let files = vec!["docs/schema.json"];
2436        let contracts = detect_contracts(&files);
2437        assert!(contracts.schema_changed);
2438        assert_eq!(contracts.breaking_indicators, 1);
2439    }
2440
2441    #[test]
2442    fn test_detect_contracts_none() {
2443        let files = vec!["README.md", "src/utils.rs"];
2444        let contracts = detect_contracts(&files);
2445        assert!(!contracts.api_changed);
2446        assert!(!contracts.cli_changed);
2447        assert!(!contracts.schema_changed);
2448        assert_eq!(contracts.breaking_indicators, 0);
2449    }
2450
2451    #[test]
2452    fn test_detect_contracts_all() {
2453        let files = vec![
2454            "crates/tokmd-types/src/lib.rs",
2455            "crates/tokmd/src/commands/lang.rs",
2456            "docs/schema.json",
2457        ];
2458        let contracts = detect_contracts(&files);
2459        assert!(contracts.api_changed);
2460        assert!(contracts.cli_changed);
2461        assert!(contracts.schema_changed);
2462        assert_eq!(contracts.breaking_indicators, 2); // api + schema
2463    }
2464
2465    // ---- compute_code_health ----
2466
2467    fn make_stat(path: &str, insertions: usize, deletions: usize) -> FileStat {
2468        FileStat {
2469            path: path.to_string(),
2470            insertions,
2471            deletions,
2472        }
2473    }
2474
2475    #[test]
2476    fn test_code_health_perfect_score() {
2477        let stats = vec![make_stat("src/main.rs", 10, 5)];
2478        let contracts = Contracts {
2479            api_changed: false,
2480            cli_changed: false,
2481            schema_changed: false,
2482            breaking_indicators: 0,
2483        };
2484        let health = compute_code_health(&stats, &contracts);
2485        assert_eq!(health.score, 100);
2486        assert_eq!(health.grade, "A");
2487        assert_eq!(health.large_files_touched, 0);
2488    }
2489
2490    #[test]
2491    fn test_code_health_large_file_penalty() {
2492        let stats = vec![make_stat("src/huge.rs", 400, 200)]; // >500 lines
2493        let contracts = Contracts {
2494            api_changed: false,
2495            cli_changed: false,
2496            schema_changed: false,
2497            breaking_indicators: 0,
2498        };
2499        let health = compute_code_health(&stats, &contracts);
2500        assert!(health.score < 100);
2501        assert_eq!(health.large_files_touched, 1);
2502        assert!(!health.warnings.is_empty());
2503    }
2504
2505    #[test]
2506    fn test_code_health_breaking_changes_penalty() {
2507        let stats = vec![make_stat("src/lib.rs", 10, 5)];
2508        let contracts = Contracts {
2509            api_changed: true,
2510            cli_changed: false,
2511            schema_changed: false,
2512            breaking_indicators: 1,
2513        };
2514        let health = compute_code_health(&stats, &contracts);
2515        assert_eq!(health.score, 80); // 100 - 20 for breaking
2516    }
2517
2518    #[test]
2519    fn test_code_health_empty_stats() {
2520        let contracts = Contracts {
2521            api_changed: false,
2522            cli_changed: false,
2523            schema_changed: false,
2524            breaking_indicators: 0,
2525        };
2526        let health = compute_code_health(&[], &contracts);
2527        assert_eq!(health.score, 100);
2528        assert_eq!(health.avg_file_size, 0);
2529    }
2530
2531    #[test]
2532    fn test_code_health_complexity_indicators() {
2533        // 0 large files = Low
2534        let contracts = Contracts {
2535            api_changed: false,
2536            cli_changed: false,
2537            schema_changed: false,
2538            breaking_indicators: 0,
2539        };
2540        let health = compute_code_health(&[], &contracts);
2541        assert_eq!(health.complexity_indicator, ComplexityIndicator::Low);
2542
2543        // 1 large file = Medium
2544        let stats = vec![make_stat("big.rs", 300, 300)];
2545        let health = compute_code_health(&stats, &contracts);
2546        assert_eq!(health.complexity_indicator, ComplexityIndicator::Medium);
2547    }
2548
2549    // ---- compute_risk ----
2550
2551    #[test]
2552    fn test_risk_no_hotspots() {
2553        let stats = vec![make_stat("src/main.rs", 10, 5)];
2554        let contracts = Contracts {
2555            api_changed: false,
2556            cli_changed: false,
2557            schema_changed: false,
2558            breaking_indicators: 0,
2559        };
2560        let health = compute_code_health(&stats, &contracts);
2561        let risk = compute_risk(&stats, &contracts, &health);
2562        assert_eq!(risk.level, RiskLevel::Low);
2563        assert!(risk.hotspots_touched.is_empty());
2564    }
2565
2566    #[test]
2567    fn test_risk_with_hotspots() {
2568        let stats = vec![
2569            make_stat("src/huge.rs", 200, 200), // >300 lines total
2570            make_stat("src/big.rs", 200, 200),  // >300 lines total
2571        ];
2572        let contracts = Contracts {
2573            api_changed: false,
2574            cli_changed: false,
2575            schema_changed: false,
2576            breaking_indicators: 0,
2577        };
2578        let health = compute_code_health(&stats, &contracts);
2579        let risk = compute_risk(&stats, &contracts, &health);
2580        assert!(!risk.hotspots_touched.is_empty());
2581        assert!(risk.score > 0);
2582    }
2583
2584    // ---- generate_review_plan ----
2585
2586    #[test]
2587    fn test_review_plan_sorted_by_priority() {
2588        let stats = vec![
2589            make_stat("small.rs", 10, 5),    // priority 3
2590            make_stat("medium.rs", 40, 30),  // priority 2
2591            make_stat("large.rs", 150, 100), // priority 1
2592        ];
2593        let contracts = Contracts {
2594            api_changed: false,
2595            cli_changed: false,
2596            schema_changed: false,
2597            breaking_indicators: 0,
2598        };
2599        let plan = generate_review_plan(&stats, &contracts);
2600        assert_eq!(plan.len(), 3);
2601        assert_eq!(plan[0].priority, 1);
2602        assert_eq!(plan[1].priority, 2);
2603        assert_eq!(plan[2].priority, 3);
2604    }
2605
2606    #[test]
2607    fn test_review_plan_tiebreaks_by_path_within_priority() {
2608        let stats = vec![
2609            make_stat("zeta.rs", 120, 20),
2610            make_stat("alpha.rs", 110, 10),
2611            make_stat("middle.rs", 60, 0),
2612        ];
2613        let contracts = Contracts {
2614            api_changed: false,
2615            cli_changed: false,
2616            schema_changed: false,
2617            breaking_indicators: 0,
2618        };
2619        let plan = generate_review_plan(&stats, &contracts);
2620        assert_eq!(plan[0].path, "alpha.rs");
2621        assert_eq!(plan[1].path, "middle.rs");
2622        assert_eq!(plan[2].path, "zeta.rs");
2623    }
2624
2625    #[test]
2626    fn test_review_plan_empty() {
2627        let contracts = Contracts {
2628            api_changed: false,
2629            cli_changed: false,
2630            schema_changed: false,
2631            breaking_indicators: 0,
2632        };
2633        let plan = generate_review_plan(&[], &contracts);
2634        assert!(plan.is_empty());
2635    }
2636
2637    #[test]
2638    fn test_review_plan_complexity_scores() {
2639        let stats = vec![
2640            make_stat("huge.rs", 200, 200), // >300 lines: complexity 5
2641            make_stat("med.rs", 60, 60),    // >100 lines: complexity 3
2642            make_stat("small.rs", 5, 5),    // <=100 lines: complexity 1
2643        ];
2644        let contracts = Contracts {
2645            api_changed: false,
2646            cli_changed: false,
2647            schema_changed: false,
2648            breaking_indicators: 0,
2649        };
2650        let plan = generate_review_plan(&stats, &contracts);
2651        // Find each item by path
2652        let huge = plan.iter().find(|i| i.path == "huge.rs").unwrap();
2653        let med = plan.iter().find(|i| i.path == "med.rs").unwrap();
2654        let small = plan.iter().find(|i| i.path == "small.rs").unwrap();
2655        assert_eq!(huge.complexity, Some(5));
2656        assert_eq!(med.complexity, Some(3));
2657        assert_eq!(small.complexity, Some(1));
2658    }
2659
2660    // ---- flush_uncovered_hunks ----
2661
2662    #[test]
2663    #[cfg(feature = "git")]
2664    fn test_flush_uncovered_hunks_consecutive() {
2665        let mut hunks = Vec::new();
2666        flush_uncovered_hunks("test.rs", &[1, 2, 3, 5, 6, 10], &mut hunks);
2667        assert_eq!(hunks.len(), 3);
2668        assert_eq!(hunks[0].start_line, 1);
2669        assert_eq!(hunks[0].end_line, 3);
2670        assert_eq!(hunks[1].start_line, 5);
2671        assert_eq!(hunks[1].end_line, 6);
2672        assert_eq!(hunks[2].start_line, 10);
2673        assert_eq!(hunks[2].end_line, 10);
2674    }
2675
2676    #[test]
2677    #[cfg(feature = "git")]
2678    fn test_flush_uncovered_hunks_empty() {
2679        let mut hunks = Vec::new();
2680        flush_uncovered_hunks("test.rs", &[], &mut hunks);
2681        assert!(hunks.is_empty());
2682    }
2683
2684    #[test]
2685    #[cfg(feature = "git")]
2686    fn test_flush_uncovered_hunks_empty_file() {
2687        let mut hunks = Vec::new();
2688        flush_uncovered_hunks("", &[1, 2], &mut hunks);
2689        assert!(hunks.is_empty());
2690    }
2691
2692    #[test]
2693    #[cfg(feature = "git")]
2694    fn test_flush_uncovered_hunks_single_line() {
2695        let mut hunks = Vec::new();
2696        flush_uncovered_hunks("test.rs", &[42], &mut hunks);
2697        assert_eq!(hunks.len(), 1);
2698        assert_eq!(hunks[0].start_line, 42);
2699        assert_eq!(hunks[0].end_line, 42);
2700    }
2701
2702    #[test]
2703    #[cfg(feature = "git")]
2704    fn test_diff_coverage_gate_flushes_unterminated_final_lcov_record() {
2705        let dir = tempfile::tempdir().unwrap();
2706        std::fs::create_dir_all(dir.path().join("src")).unwrap();
2707        std::fs::write(dir.path().join("src/lib.rs"), "fn a() {}\n").unwrap();
2708
2709        let git = |args: &[&str]| {
2710            let status = Command::new("git")
2711                .args(args)
2712                .current_dir(dir.path())
2713                .status()
2714                .unwrap();
2715            assert!(status.success(), "git {:?} failed", args);
2716        };
2717
2718        git(&["init", "-b", "main"]);
2719        git(&["config", "user.email", "tokmd@example.com"]);
2720        git(&["config", "user.name", "tokmd"]);
2721        git(&["add", "."]);
2722        git(&["commit", "-m", "base"]);
2723
2724        std::fs::write(dir.path().join("src/lib.rs"), "fn a() {}\nfn b() {}\n").unwrap();
2725        git(&["add", "."]);
2726        git(&["commit", "-m", "head"]);
2727
2728        std::fs::write(dir.path().join("lcov.info"), "SF:src/lib.rs\nDA:2,1\n").unwrap();
2729
2730        let gate = compute_diff_coverage_gate(
2731            dir.path(),
2732            "HEAD~1",
2733            "HEAD",
2734            tokmd_git::GitRangeMode::TwoDot,
2735        )
2736        .unwrap()
2737        .expect("diff coverage gate should exist");
2738
2739        assert_eq!(gate.coverage_pct, 1.0);
2740        assert_eq!(gate.meta.scope.lines_relevant, Some(1));
2741        assert_eq!(gate.meta.scope.lines_tested, Some(1));
2742    }
2743
2744    // ---- now_iso8601 ----
2745
2746    #[test]
2747    fn test_now_iso8601_format() {
2748        let ts = now_iso8601();
2749        assert!(ts.ends_with('Z'));
2750        assert!(ts.contains('T'));
2751        assert_eq!(ts.len(), 20);
2752    }
2753
2754    // ---- FileStat AsRef ----
2755
2756    #[test]
2757    fn test_filestat_as_ref() {
2758        let stat = FileStat {
2759            path: "src/main.rs".to_string(),
2760            insertions: 10,
2761            deletions: 5,
2762        };
2763        let s: &str = stat.as_ref();
2764        assert_eq!(s, "src/main.rs");
2765    }
2766}