Skip to main content

fallow_cli/
explain.rs

1//! Metric and rule definitions for explainable CLI output.
2//!
3//! Provides structured metadata that describes what each metric, threshold,
4//! and rule means — consumed by the `_meta` object in JSON output and by
5//! SARIF `fullDescription` / `helpUri` fields.
6
7use serde_json::{Value, json};
8
9// ── Docs base URL ────────────────────────────────────────────────
10
11const DOCS_BASE: &str = "https://docs.fallow.tools";
12
13/// Docs URL for the dead-code (check) command.
14pub const CHECK_DOCS: &str = "https://docs.fallow.tools/cli/dead-code";
15
16/// Docs URL for the health command.
17pub const HEALTH_DOCS: &str = "https://docs.fallow.tools/cli/health";
18
19/// Docs URL for the dupes command.
20pub const DUPES_DOCS: &str = "https://docs.fallow.tools/cli/dupes";
21
22// ── Check rules ─────────────────────────────────────────────────
23
24/// Rule definition for SARIF `fullDescription` and JSON `_meta`.
25pub struct RuleDef {
26    pub id: &'static str,
27    pub name: &'static str,
28    pub short: &'static str,
29    pub full: &'static str,
30    pub docs_path: &'static str,
31}
32
33pub const CHECK_RULES: &[RuleDef] = &[
34    RuleDef {
35        id: "fallow/unused-file",
36        name: "Unused Files",
37        short: "File is not reachable from any entry point",
38        full: "Source files that are not imported by any other module and are not entry points (scripts, tests, configs). These files can safely be deleted. Detection uses graph reachability from configured entry points.",
39        docs_path: "explanations/dead-code#unused-files",
40    },
41    RuleDef {
42        id: "fallow/unused-export",
43        name: "Unused Exports",
44        short: "Export is never imported",
45        full: "Named exports that are never imported by any other module in the project. Includes both direct exports and re-exports through barrel files. The export may still be used locally within the same file.",
46        docs_path: "explanations/dead-code#unused-exports",
47    },
48    RuleDef {
49        id: "fallow/unused-type",
50        name: "Unused Type Exports",
51        short: "Type export is never imported",
52        full: "Type-only exports (interfaces, type aliases, enums used only as types) that are never imported. These do not generate runtime code but add maintenance burden.",
53        docs_path: "explanations/dead-code#unused-types",
54    },
55    RuleDef {
56        id: "fallow/unused-dependency",
57        name: "Unused Dependencies",
58        short: "Dependency listed but never imported",
59        full: "Packages listed in dependencies that are never imported or required by any source file. Framework plugins and CLI tools may be false positives — use the ignore_dependencies config to suppress.",
60        docs_path: "explanations/dead-code#unused-dependencies",
61    },
62    RuleDef {
63        id: "fallow/unused-dev-dependency",
64        name: "Unused Dev Dependencies",
65        short: "Dev dependency listed but never imported",
66        full: "Packages listed in devDependencies that are never imported by test files, config files, or scripts. Build tools and jest presets that are referenced only in config may appear as false positives.",
67        docs_path: "explanations/dead-code#unused-devdependencies",
68    },
69    RuleDef {
70        id: "fallow/unused-optional-dependency",
71        name: "Unused Optional Dependencies",
72        short: "Optional dependency listed but never imported",
73        full: "Packages listed in optionalDependencies that are never imported. Optional dependencies are typically platform-specific — verify they are not needed on any supported platform before removing.",
74        docs_path: "explanations/dead-code#unused-optionaldependencies",
75    },
76    RuleDef {
77        id: "fallow/type-only-dependency",
78        name: "Type-only Dependencies",
79        short: "Production dependency only used via type-only imports",
80        full: "Production dependencies that are only imported via `import type` statements. These can be moved to devDependencies since they generate no runtime code and are stripped during compilation.",
81        docs_path: "explanations/dead-code#type-only-dependencies",
82    },
83    RuleDef {
84        id: "fallow/unused-enum-member",
85        name: "Unused Enum Members",
86        short: "Enum member is never referenced",
87        full: "Enum members that are never referenced in the codebase. Uses scope-aware binding analysis to track all references including computed access patterns.",
88        docs_path: "explanations/dead-code#unused-enum-members",
89    },
90    RuleDef {
91        id: "fallow/unused-class-member",
92        name: "Unused Class Members",
93        short: "Class member is never referenced",
94        full: "Class methods and properties that are never referenced outside the class. Private members are checked within the class scope; public members are checked project-wide.",
95        docs_path: "explanations/dead-code#unused-class-members",
96    },
97    RuleDef {
98        id: "fallow/unresolved-import",
99        name: "Unresolved Imports",
100        short: "Import could not be resolved",
101        full: "Import specifiers that could not be resolved to a file on disk. Common causes: deleted files, typos in paths, missing path aliases in tsconfig, or uninstalled packages.",
102        docs_path: "explanations/dead-code#unresolved-imports",
103    },
104    RuleDef {
105        id: "fallow/unlisted-dependency",
106        name: "Unlisted Dependencies",
107        short: "Dependency used but not in package.json",
108        full: "Packages that are imported in source code but not listed in package.json. These work by accident (hoisted from another workspace package or transitive dep) and will break in strict package managers.",
109        docs_path: "explanations/dead-code#unlisted-dependencies",
110    },
111    RuleDef {
112        id: "fallow/duplicate-export",
113        name: "Duplicate Exports",
114        short: "Export name appears in multiple modules",
115        full: "The same export name is defined in multiple modules. Consumers may import from the wrong module, leading to subtle bugs. Consider renaming or consolidating.",
116        docs_path: "explanations/dead-code#duplicate-exports",
117    },
118    RuleDef {
119        id: "fallow/circular-dependency",
120        name: "Circular Dependencies",
121        short: "Circular dependency chain detected",
122        full: "A cycle in the module import graph. Circular dependencies cause undefined behavior with CommonJS (partial modules) and initialization ordering issues with ESM. Break cycles by extracting shared code.",
123        docs_path: "explanations/dead-code#circular-dependencies",
124    },
125];
126
127/// Look up a rule definition by its SARIF rule ID across all rule sets.
128pub fn rule_by_id(id: &str) -> Option<&'static RuleDef> {
129    CHECK_RULES
130        .iter()
131        .chain(HEALTH_RULES.iter())
132        .chain(DUPES_RULES.iter())
133        .find(|r| r.id == id)
134}
135
136/// Build the docs URL for a rule.
137pub fn rule_docs_url(rule: &RuleDef) -> String {
138    format!("{DOCS_BASE}/{}", rule.docs_path)
139}
140
141// ── Health SARIF rules ──────────────────────────────────────────
142
143pub const HEALTH_RULES: &[RuleDef] = &[
144    RuleDef {
145        id: "fallow/high-cyclomatic-complexity",
146        name: "High Cyclomatic Complexity",
147        short: "Function has high cyclomatic complexity",
148        full: "McCabe cyclomatic complexity exceeds the configured threshold. Cyclomatic complexity counts the number of independent paths through a function (1 + decision points: if/else, switch cases, loops, ternary, logical operators). High values indicate functions that are hard to test exhaustively.",
149        docs_path: "explanations/health#cyclomatic-complexity",
150    },
151    RuleDef {
152        id: "fallow/high-cognitive-complexity",
153        name: "High Cognitive Complexity",
154        short: "Function has high cognitive complexity",
155        full: "SonarSource cognitive complexity exceeds the configured threshold. Unlike cyclomatic complexity, cognitive complexity penalizes nesting depth and non-linear control flow (breaks, continues, early returns). It measures how hard a function is to understand when reading sequentially.",
156        docs_path: "explanations/health#cognitive-complexity",
157    },
158    RuleDef {
159        id: "fallow/high-complexity",
160        name: "High Complexity (Both)",
161        short: "Function exceeds both complexity thresholds",
162        full: "Function exceeds both cyclomatic and cognitive complexity thresholds. This is the strongest signal that a function needs refactoring — it has many paths AND is hard to understand.",
163        docs_path: "explanations/health#complexity-metrics",
164    },
165    RuleDef {
166        id: "fallow/refactoring-target",
167        name: "Refactoring Target",
168        short: "File identified as a high-priority refactoring candidate",
169        full: "File identified as a refactoring candidate based on a weighted combination of complexity density, churn velocity, dead code ratio, fan-in (blast radius), and fan-out (coupling). Categories: urgent churn+complexity, break circular dependency, split high-impact file, remove dead code, extract complex functions, reduce coupling.",
170        docs_path: "explanations/health#refactoring-targets",
171    },
172];
173
174pub const DUPES_RULES: &[RuleDef] = &[RuleDef {
175    id: "fallow/code-duplication",
176    name: "Code Duplication",
177    short: "Duplicated code block",
178    full: "A block of code that appears in multiple locations with identical or near-identical token sequences. Clone detection uses normalized token comparison — identifier names and literals are abstracted away in non-strict modes.",
179    docs_path: "explanations/duplication#clone-groups",
180}];
181
182// ── JSON _meta builders ─────────────────────────────────────────
183
184/// Build the `_meta` object for `fallow dead-code --format json --explain`.
185pub fn check_meta() -> Value {
186    let rules: Value = CHECK_RULES
187        .iter()
188        .map(|r| {
189            (
190                r.id.replace("fallow/", ""),
191                json!({
192                    "name": r.name,
193                    "description": r.full,
194                    "docs": rule_docs_url(r)
195                }),
196            )
197        })
198        .collect::<serde_json::Map<String, Value>>()
199        .into();
200
201    json!({
202        "docs": CHECK_DOCS,
203        "rules": rules
204    })
205}
206
207/// Build the `_meta` object for `fallow health --format json --explain`.
208pub fn health_meta() -> Value {
209    json!({
210        "docs": HEALTH_DOCS,
211        "metrics": {
212            "cyclomatic": {
213                "name": "Cyclomatic Complexity",
214                "description": "McCabe cyclomatic complexity: 1 + number of decision points (if/else, switch cases, loops, ternary, logical operators). Measures the number of independent paths through a function.",
215                "range": "[1, \u{221e})",
216                "interpretation": "lower is better; default threshold: 20"
217            },
218            "cognitive": {
219                "name": "Cognitive Complexity",
220                "description": "SonarSource cognitive complexity: penalizes nesting depth and non-linear control flow (breaks, continues, early returns). Measures how hard a function is to understand when reading top-to-bottom.",
221                "range": "[0, \u{221e})",
222                "interpretation": "lower is better; default threshold: 15"
223            },
224            "line_count": {
225                "name": "Line Count",
226                "description": "Number of lines in the function body.",
227                "range": "[1, \u{221e})",
228                "interpretation": "context-dependent; long functions may need splitting"
229            },
230            "maintainability_index": {
231                "name": "Maintainability Index",
232                "description": "Composite score: 100 - (complexity_density \u{00d7} 30) - (dead_code_ratio \u{00d7} 20) - min(ln(fan_out+1) \u{00d7} 4, 15). Clamped to [0, 100]. Higher is better.",
233                "range": "[0, 100]",
234                "interpretation": "higher is better; <40 poor, 40\u{2013}70 moderate, >70 good"
235            },
236            "complexity_density": {
237                "name": "Complexity Density",
238                "description": "Total cyclomatic complexity divided by lines of code. Measures how densely complex the code is per line.",
239                "range": "[0, \u{221e})",
240                "interpretation": "lower is better; >1.0 indicates very dense complexity"
241            },
242            "dead_code_ratio": {
243                "name": "Dead Code Ratio",
244                "description": "Fraction of value exports (excluding type-only exports like interfaces and type aliases) with zero references across the project.",
245                "range": "[0, 1]",
246                "interpretation": "lower is better; 0 = all exports are used"
247            },
248            "fan_in": {
249                "name": "Fan-in (Importers)",
250                "description": "Number of files that import this file. High fan-in means high blast radius \u{2014} changes to this file affect many dependents.",
251                "range": "[0, \u{221e})",
252                "interpretation": "context-dependent; high fan-in files need careful review before changes"
253            },
254            "fan_out": {
255                "name": "Fan-out (Imports)",
256                "description": "Number of files this file directly imports. High fan-out indicates high coupling and change propagation risk.",
257                "range": "[0, \u{221e})",
258                "interpretation": "lower is better; MI penalty caps at ~40 imports"
259            },
260            "score": {
261                "name": "Hotspot Score",
262                "description": "normalized_churn \u{00d7} normalized_complexity \u{00d7} 100, where normalization is against the project maximum. Identifies files that are both complex AND frequently changing.",
263                "range": "[0, 100]",
264                "interpretation": "higher = riskier; prioritize refactoring high-score files"
265            },
266            "weighted_commits": {
267                "name": "Weighted Commits",
268                "description": "Recency-weighted commit count using exponential decay with 90-day half-life. Recent commits contribute more than older ones.",
269                "range": "[0, \u{221e})",
270                "interpretation": "higher = more recent churn activity"
271            },
272            "trend": {
273                "name": "Churn Trend",
274                "description": "Compares recent vs older commit frequency within the analysis window. accelerating = recent > 1.5\u{00d7} older, cooling = recent < 0.67\u{00d7} older, stable = in between.",
275                "values": ["accelerating", "stable", "cooling"],
276                "interpretation": "accelerating files need attention; cooling files are stabilizing"
277            },
278            "priority": {
279                "name": "Refactoring Priority",
280                "description": "Weighted score: complexity density (30%), hotspot boost (25%), dead code ratio (20%), fan-in (15%), fan-out (10%). Fan-in and fan-out normalization uses adaptive percentile-based thresholds (p95 of the project distribution). Does not use the maintainability index to avoid double-counting.",
281                "range": "[0, 100]",
282                "interpretation": "higher = more urgent to refactor"
283            },
284            "efficiency": {
285                "name": "Efficiency Score",
286                "description": "priority / effort_numeric (Low=1, Medium=2, High=3). Surfaces quick wins: high-priority, low-effort targets rank first. Default sort order.",
287                "range": "[0, 100] \u{2014} effective max depends on effort: Low=100, Medium=50, High\u{2248}33",
288                "interpretation": "higher = better quick-win value; targets are sorted by efficiency descending"
289            },
290            "effort": {
291                "name": "Effort Estimate",
292                "description": "Heuristic effort estimate based on file size, function count, and fan-in. Thresholds adapt to the project\u{2019}s distribution (percentile-based). Low: small file, few functions, low fan-in. High: large file, high fan-in, or many functions with high density. Medium: everything else.",
293                "values": ["low", "medium", "high"],
294                "interpretation": "low = quick win, high = needs planning and coordination"
295            },
296            "confidence": {
297                "name": "Confidence Level",
298                "description": "Reliability of the recommendation based on data source. High: deterministic graph/AST analysis (dead code, circular deps, complexity). Medium: heuristic thresholds (fan-in/fan-out coupling). Low: depends on git history quality (churn-based recommendations).",
299                "values": ["high", "medium", "low"],
300                "interpretation": "high = act on it, medium = verify context, low = treat as a signal, not a directive"
301            },
302            "health_score": {
303                "name": "Health Score",
304                "description": "Project-level aggregate score computed from vital signs: dead code, complexity, maintainability, hotspots, unused deps, and circular deps. Penalties subtracted from 100. Missing metrics (from pipelines that didn't run) don't penalize. Use --score to force full pipeline for maximum accuracy.",
305                "range": "[0, 100]",
306                "interpretation": "higher is better; A (85\u{2013}100), B (70\u{2013}84), C (55\u{2013}69), D (40\u{2013}54), F (0\u{2013}39)"
307            }
308        }
309    })
310}
311
312/// Build the `_meta` object for `fallow dupes --format json --explain`.
313pub fn dupes_meta() -> Value {
314    json!({
315        "docs": DUPES_DOCS,
316        "metrics": {
317            "duplication_percentage": {
318                "name": "Duplication Percentage",
319                "description": "Fraction of total source tokens that appear in at least one clone group. Computed over the full analyzed file set.",
320                "range": "[0, 100]",
321                "interpretation": "lower is better"
322            },
323            "token_count": {
324                "name": "Token Count",
325                "description": "Number of normalized source tokens in the clone group. Tokens are language-aware (keywords, identifiers, operators, punctuation). Higher token count = larger duplicate.",
326                "range": "[1, \u{221e})",
327                "interpretation": "larger clones have higher refactoring value"
328            },
329            "line_count": {
330                "name": "Line Count",
331                "description": "Number of source lines spanned by the clone instance. Approximation of clone size for human readability.",
332                "range": "[1, \u{221e})",
333                "interpretation": "larger clones are more impactful to deduplicate"
334            },
335            "clone_groups": {
336                "name": "Clone Groups",
337                "description": "A set of code fragments with identical or near-identical normalized token sequences. Each group has 2+ instances across different locations.",
338                "interpretation": "each group is a single refactoring opportunity"
339            },
340            "clone_families": {
341                "name": "Clone Families",
342                "description": "Groups of clone groups that share the same set of files. Indicates systematic duplication patterns (e.g., mirrored directory structures).",
343                "interpretation": "families suggest extract-module refactoring opportunities"
344            }
345        }
346    })
347}
348
349#[cfg(test)]
350mod tests {
351    use super::*;
352
353    // ── rule_by_id ───────────────────────────────────────────────────
354
355    #[test]
356    fn rule_by_id_finds_check_rule() {
357        let rule = rule_by_id("fallow/unused-file").unwrap();
358        assert_eq!(rule.name, "Unused Files");
359    }
360
361    #[test]
362    fn rule_by_id_finds_health_rule() {
363        let rule = rule_by_id("fallow/high-cyclomatic-complexity").unwrap();
364        assert_eq!(rule.name, "High Cyclomatic Complexity");
365    }
366
367    #[test]
368    fn rule_by_id_finds_dupes_rule() {
369        let rule = rule_by_id("fallow/code-duplication").unwrap();
370        assert_eq!(rule.name, "Code Duplication");
371    }
372
373    #[test]
374    fn rule_by_id_returns_none_for_unknown() {
375        assert!(rule_by_id("fallow/nonexistent").is_none());
376        assert!(rule_by_id("").is_none());
377    }
378
379    // ── rule_docs_url ────────────────────────────────────────────────
380
381    #[test]
382    fn rule_docs_url_format() {
383        let rule = rule_by_id("fallow/unused-export").unwrap();
384        let url = rule_docs_url(rule);
385        assert!(url.starts_with("https://docs.fallow.tools/"));
386        assert!(url.contains("unused-exports"));
387    }
388
389    // ── CHECK_RULES completeness ─────────────────────────────────────
390
391    #[test]
392    fn check_rules_all_have_fallow_prefix() {
393        for rule in CHECK_RULES {
394            assert!(
395                rule.id.starts_with("fallow/"),
396                "rule {} should start with fallow/",
397                rule.id
398            );
399        }
400    }
401
402    #[test]
403    fn check_rules_all_have_docs_path() {
404        for rule in CHECK_RULES {
405            assert!(
406                !rule.docs_path.is_empty(),
407                "rule {} should have a docs_path",
408                rule.id
409            );
410        }
411    }
412
413    #[test]
414    fn check_rules_no_duplicate_ids() {
415        let mut seen = rustc_hash::FxHashSet::default();
416        for rule in CHECK_RULES.iter().chain(HEALTH_RULES).chain(DUPES_RULES) {
417            assert!(seen.insert(rule.id), "duplicate rule id: {}", rule.id);
418        }
419    }
420
421    // ── check_meta ───────────────────────────────────────────────────
422
423    #[test]
424    fn check_meta_has_docs_and_rules() {
425        let meta = check_meta();
426        assert!(meta.get("docs").is_some());
427        assert!(meta.get("rules").is_some());
428        let rules = meta["rules"].as_object().unwrap();
429        // Verify all 13 rule categories are present (stripped fallow/ prefix)
430        assert_eq!(rules.len(), CHECK_RULES.len());
431        assert!(rules.contains_key("unused-file"));
432        assert!(rules.contains_key("unused-export"));
433        assert!(rules.contains_key("unused-type"));
434        assert!(rules.contains_key("unused-dependency"));
435        assert!(rules.contains_key("unused-dev-dependency"));
436        assert!(rules.contains_key("unused-optional-dependency"));
437        assert!(rules.contains_key("unused-enum-member"));
438        assert!(rules.contains_key("unused-class-member"));
439        assert!(rules.contains_key("unresolved-import"));
440        assert!(rules.contains_key("unlisted-dependency"));
441        assert!(rules.contains_key("duplicate-export"));
442        assert!(rules.contains_key("type-only-dependency"));
443        assert!(rules.contains_key("circular-dependency"));
444    }
445
446    #[test]
447    fn check_meta_rule_has_required_fields() {
448        let meta = check_meta();
449        let rules = meta["rules"].as_object().unwrap();
450        for (key, value) in rules {
451            assert!(value.get("name").is_some(), "rule {key} missing 'name'");
452            assert!(
453                value.get("description").is_some(),
454                "rule {key} missing 'description'"
455            );
456            assert!(value.get("docs").is_some(), "rule {key} missing 'docs'");
457        }
458    }
459
460    // ── health_meta ──────────────────────────────────────────────────
461
462    #[test]
463    fn health_meta_has_metrics() {
464        let meta = health_meta();
465        assert!(meta.get("docs").is_some());
466        let metrics = meta["metrics"].as_object().unwrap();
467        assert!(metrics.contains_key("cyclomatic"));
468        assert!(metrics.contains_key("cognitive"));
469        assert!(metrics.contains_key("maintainability_index"));
470        assert!(metrics.contains_key("complexity_density"));
471        assert!(metrics.contains_key("fan_in"));
472        assert!(metrics.contains_key("fan_out"));
473    }
474
475    // ── dupes_meta ───────────────────────────────────────────────────
476
477    #[test]
478    fn dupes_meta_has_metrics() {
479        let meta = dupes_meta();
480        assert!(meta.get("docs").is_some());
481        let metrics = meta["metrics"].as_object().unwrap();
482        assert!(metrics.contains_key("duplication_percentage"));
483        assert!(metrics.contains_key("token_count"));
484        assert!(metrics.contains_key("clone_groups"));
485        assert!(metrics.contains_key("clone_families"));
486    }
487
488    // ── HEALTH_RULES completeness ──────────────────────────────────
489
490    #[test]
491    fn health_rules_all_have_fallow_prefix() {
492        for rule in HEALTH_RULES {
493            assert!(
494                rule.id.starts_with("fallow/"),
495                "health rule {} should start with fallow/",
496                rule.id
497            );
498        }
499    }
500
501    #[test]
502    fn health_rules_all_have_docs_path() {
503        for rule in HEALTH_RULES {
504            assert!(
505                !rule.docs_path.is_empty(),
506                "health rule {} should have a docs_path",
507                rule.id
508            );
509        }
510    }
511
512    #[test]
513    fn health_rules_all_have_non_empty_fields() {
514        for rule in HEALTH_RULES {
515            assert!(
516                !rule.name.is_empty(),
517                "health rule {} missing name",
518                rule.id
519            );
520            assert!(
521                !rule.short.is_empty(),
522                "health rule {} missing short description",
523                rule.id
524            );
525            assert!(
526                !rule.full.is_empty(),
527                "health rule {} missing full description",
528                rule.id
529            );
530        }
531    }
532
533    // ── DUPES_RULES completeness ───────────────────────────────────
534
535    #[test]
536    fn dupes_rules_all_have_fallow_prefix() {
537        for rule in DUPES_RULES {
538            assert!(
539                rule.id.starts_with("fallow/"),
540                "dupes rule {} should start with fallow/",
541                rule.id
542            );
543        }
544    }
545
546    #[test]
547    fn dupes_rules_all_have_docs_path() {
548        for rule in DUPES_RULES {
549            assert!(
550                !rule.docs_path.is_empty(),
551                "dupes rule {} should have a docs_path",
552                rule.id
553            );
554        }
555    }
556
557    #[test]
558    fn dupes_rules_all_have_non_empty_fields() {
559        for rule in DUPES_RULES {
560            assert!(!rule.name.is_empty(), "dupes rule {} missing name", rule.id);
561            assert!(
562                !rule.short.is_empty(),
563                "dupes rule {} missing short description",
564                rule.id
565            );
566            assert!(
567                !rule.full.is_empty(),
568                "dupes rule {} missing full description",
569                rule.id
570            );
571        }
572    }
573
574    // ── CHECK_RULES field completeness ─────────────────────────────
575
576    #[test]
577    fn check_rules_all_have_non_empty_fields() {
578        for rule in CHECK_RULES {
579            assert!(!rule.name.is_empty(), "check rule {} missing name", rule.id);
580            assert!(
581                !rule.short.is_empty(),
582                "check rule {} missing short description",
583                rule.id
584            );
585            assert!(
586                !rule.full.is_empty(),
587                "check rule {} missing full description",
588                rule.id
589            );
590        }
591    }
592
593    // ── rule_docs_url with health/dupes rules ──────────────────────
594
595    #[test]
596    fn rule_docs_url_health_rule() {
597        let rule = rule_by_id("fallow/high-cyclomatic-complexity").unwrap();
598        let url = rule_docs_url(rule);
599        assert!(url.starts_with("https://docs.fallow.tools/"));
600        assert!(url.contains("health"));
601    }
602
603    #[test]
604    fn rule_docs_url_dupes_rule() {
605        let rule = rule_by_id("fallow/code-duplication").unwrap();
606        let url = rule_docs_url(rule);
607        assert!(url.starts_with("https://docs.fallow.tools/"));
608        assert!(url.contains("duplication"));
609    }
610
611    // ── health_meta metric structure ───────────────────────────────
612
613    #[test]
614    fn health_meta_all_metrics_have_name_and_description() {
615        let meta = health_meta();
616        let metrics = meta["metrics"].as_object().unwrap();
617        for (key, value) in metrics {
618            assert!(
619                value.get("name").is_some(),
620                "health metric {key} missing 'name'"
621            );
622            assert!(
623                value.get("description").is_some(),
624                "health metric {key} missing 'description'"
625            );
626            assert!(
627                value.get("interpretation").is_some(),
628                "health metric {key} missing 'interpretation'"
629            );
630        }
631    }
632
633    #[test]
634    fn health_meta_has_all_expected_metrics() {
635        let meta = health_meta();
636        let metrics = meta["metrics"].as_object().unwrap();
637        let expected = [
638            "cyclomatic",
639            "cognitive",
640            "line_count",
641            "maintainability_index",
642            "complexity_density",
643            "dead_code_ratio",
644            "fan_in",
645            "fan_out",
646            "score",
647            "weighted_commits",
648            "trend",
649            "priority",
650            "efficiency",
651            "effort",
652            "confidence",
653        ];
654        for key in &expected {
655            assert!(
656                metrics.contains_key(*key),
657                "health_meta missing expected metric: {key}"
658            );
659        }
660    }
661
662    // ── dupes_meta metric structure ────────────────────────────────
663
664    #[test]
665    fn dupes_meta_all_metrics_have_name_and_description() {
666        let meta = dupes_meta();
667        let metrics = meta["metrics"].as_object().unwrap();
668        for (key, value) in metrics {
669            assert!(
670                value.get("name").is_some(),
671                "dupes metric {key} missing 'name'"
672            );
673            assert!(
674                value.get("description").is_some(),
675                "dupes metric {key} missing 'description'"
676            );
677        }
678    }
679
680    #[test]
681    fn dupes_meta_has_line_count() {
682        let meta = dupes_meta();
683        let metrics = meta["metrics"].as_object().unwrap();
684        assert!(metrics.contains_key("line_count"));
685    }
686
687    // ── docs URLs ─────────────────────────────────────────────────
688
689    #[test]
690    fn check_docs_url_valid() {
691        assert!(CHECK_DOCS.starts_with("https://"));
692        assert!(CHECK_DOCS.contains("dead-code"));
693    }
694
695    #[test]
696    fn health_docs_url_valid() {
697        assert!(HEALTH_DOCS.starts_with("https://"));
698        assert!(HEALTH_DOCS.contains("health"));
699    }
700
701    #[test]
702    fn dupes_docs_url_valid() {
703        assert!(DUPES_DOCS.starts_with("https://"));
704        assert!(DUPES_DOCS.contains("dupes"));
705    }
706
707    // ── check_meta docs URL matches constant ──────────────────────
708
709    #[test]
710    fn check_meta_docs_url_matches_constant() {
711        let meta = check_meta();
712        assert_eq!(meta["docs"].as_str().unwrap(), CHECK_DOCS);
713    }
714
715    #[test]
716    fn health_meta_docs_url_matches_constant() {
717        let meta = health_meta();
718        assert_eq!(meta["docs"].as_str().unwrap(), HEALTH_DOCS);
719    }
720
721    #[test]
722    fn dupes_meta_docs_url_matches_constant() {
723        let meta = dupes_meta();
724        assert_eq!(meta["docs"].as_str().unwrap(), DUPES_DOCS);
725    }
726
727    // ── rule_by_id finds all check rules ──────────────────────────
728
729    #[test]
730    fn rule_by_id_finds_all_check_rules() {
731        for rule in CHECK_RULES {
732            assert!(
733                rule_by_id(rule.id).is_some(),
734                "rule_by_id should find check rule {}",
735                rule.id
736            );
737        }
738    }
739
740    #[test]
741    fn rule_by_id_finds_all_health_rules() {
742        for rule in HEALTH_RULES {
743            assert!(
744                rule_by_id(rule.id).is_some(),
745                "rule_by_id should find health rule {}",
746                rule.id
747            );
748        }
749    }
750
751    #[test]
752    fn rule_by_id_finds_all_dupes_rules() {
753        for rule in DUPES_RULES {
754            assert!(
755                rule_by_id(rule.id).is_some(),
756                "rule_by_id should find dupes rule {}",
757                rule.id
758            );
759        }
760    }
761
762    // ── Rule count verification ───────────────────────────────────
763
764    #[test]
765    fn check_rules_count() {
766        assert_eq!(CHECK_RULES.len(), 13);
767    }
768
769    #[test]
770    fn health_rules_count() {
771        assert_eq!(HEALTH_RULES.len(), 4);
772    }
773
774    #[test]
775    fn dupes_rules_count() {
776        assert_eq!(DUPES_RULES.len(), 1);
777    }
778}