1use serde_json::{Value, json};
8
9const DOCS_BASE: &str = "https://docs.fallow.tools";
12
13pub const CHECK_DOCS: &str = "https://docs.fallow.tools/cli/dead-code";
15
16pub const HEALTH_DOCS: &str = "https://docs.fallow.tools/cli/health";
18
19pub const DUPES_DOCS: &str = "https://docs.fallow.tools/cli/dupes";
21
22pub struct RuleDef {
26 pub id: &'static str,
27 pub name: &'static str,
28 pub short: &'static str,
29 pub full: &'static str,
30 pub docs_path: &'static str,
31}
32
33pub const CHECK_RULES: &[RuleDef] = &[
34 RuleDef {
35 id: "fallow/unused-file",
36 name: "Unused Files",
37 short: "File is not reachable from any entry point",
38 full: "Source files that are not imported by any other module and are not entry points (scripts, tests, configs). These files can safely be deleted. Detection uses graph reachability from configured entry points.",
39 docs_path: "explanations/dead-code#unused-files",
40 },
41 RuleDef {
42 id: "fallow/unused-export",
43 name: "Unused Exports",
44 short: "Export is never imported",
45 full: "Named exports that are never imported by any other module in the project. Includes both direct exports and re-exports through barrel files. The export may still be used locally within the same file.",
46 docs_path: "explanations/dead-code#unused-exports",
47 },
48 RuleDef {
49 id: "fallow/unused-type",
50 name: "Unused Type Exports",
51 short: "Type export is never imported",
52 full: "Type-only exports (interfaces, type aliases, enums used only as types) that are never imported. These do not generate runtime code but add maintenance burden.",
53 docs_path: "explanations/dead-code#unused-types",
54 },
55 RuleDef {
56 id: "fallow/unused-dependency",
57 name: "Unused Dependencies",
58 short: "Dependency listed but never imported",
59 full: "Packages listed in dependencies that are never imported or required by any source file. Framework plugins and CLI tools may be false positives — use the ignore_dependencies config to suppress.",
60 docs_path: "explanations/dead-code#unused-dependencies",
61 },
62 RuleDef {
63 id: "fallow/unused-dev-dependency",
64 name: "Unused Dev Dependencies",
65 short: "Dev dependency listed but never imported",
66 full: "Packages listed in devDependencies that are never imported by test files, config files, or scripts. Build tools and jest presets that are referenced only in config may appear as false positives.",
67 docs_path: "explanations/dead-code#unused-devdependencies",
68 },
69 RuleDef {
70 id: "fallow/unused-optional-dependency",
71 name: "Unused Optional Dependencies",
72 short: "Optional dependency listed but never imported",
73 full: "Packages listed in optionalDependencies that are never imported. Optional dependencies are typically platform-specific — verify they are not needed on any supported platform before removing.",
74 docs_path: "explanations/dead-code#unused-optionaldependencies",
75 },
76 RuleDef {
77 id: "fallow/type-only-dependency",
78 name: "Type-only Dependencies",
79 short: "Production dependency only used via type-only imports",
80 full: "Production dependencies that are only imported via `import type` statements. These can be moved to devDependencies since they generate no runtime code and are stripped during compilation.",
81 docs_path: "explanations/dead-code#type-only-dependencies",
82 },
83 RuleDef {
84 id: "fallow/unused-enum-member",
85 name: "Unused Enum Members",
86 short: "Enum member is never referenced",
87 full: "Enum members that are never referenced in the codebase. Uses scope-aware binding analysis to track all references including computed access patterns.",
88 docs_path: "explanations/dead-code#unused-enum-members",
89 },
90 RuleDef {
91 id: "fallow/unused-class-member",
92 name: "Unused Class Members",
93 short: "Class member is never referenced",
94 full: "Class methods and properties that are never referenced outside the class. Private members are checked within the class scope; public members are checked project-wide.",
95 docs_path: "explanations/dead-code#unused-class-members",
96 },
97 RuleDef {
98 id: "fallow/unresolved-import",
99 name: "Unresolved Imports",
100 short: "Import could not be resolved",
101 full: "Import specifiers that could not be resolved to a file on disk. Common causes: deleted files, typos in paths, missing path aliases in tsconfig, or uninstalled packages.",
102 docs_path: "explanations/dead-code#unresolved-imports",
103 },
104 RuleDef {
105 id: "fallow/unlisted-dependency",
106 name: "Unlisted Dependencies",
107 short: "Dependency used but not in package.json",
108 full: "Packages that are imported in source code but not listed in package.json. These work by accident (hoisted from another workspace package or transitive dep) and will break in strict package managers.",
109 docs_path: "explanations/dead-code#unlisted-dependencies",
110 },
111 RuleDef {
112 id: "fallow/duplicate-export",
113 name: "Duplicate Exports",
114 short: "Export name appears in multiple modules",
115 full: "The same export name is defined in multiple modules. Consumers may import from the wrong module, leading to subtle bugs. Consider renaming or consolidating.",
116 docs_path: "explanations/dead-code#duplicate-exports",
117 },
118 RuleDef {
119 id: "fallow/circular-dependency",
120 name: "Circular Dependencies",
121 short: "Circular dependency chain detected",
122 full: "A cycle in the module import graph. Circular dependencies cause undefined behavior with CommonJS (partial modules) and initialization ordering issues with ESM. Break cycles by extracting shared code.",
123 docs_path: "explanations/dead-code#circular-dependencies",
124 },
125];
126
127#[must_use]
129pub fn rule_by_id(id: &str) -> Option<&'static RuleDef> {
130 CHECK_RULES
131 .iter()
132 .chain(HEALTH_RULES.iter())
133 .chain(DUPES_RULES.iter())
134 .find(|r| r.id == id)
135}
136
137#[must_use]
139pub fn rule_docs_url(rule: &RuleDef) -> String {
140 format!("{DOCS_BASE}/{}", rule.docs_path)
141}
142
143pub const HEALTH_RULES: &[RuleDef] = &[
146 RuleDef {
147 id: "fallow/high-cyclomatic-complexity",
148 name: "High Cyclomatic Complexity",
149 short: "Function has high cyclomatic complexity",
150 full: "McCabe cyclomatic complexity exceeds the configured threshold. Cyclomatic complexity counts the number of independent paths through a function (1 + decision points: if/else, switch cases, loops, ternary, logical operators). High values indicate functions that are hard to test exhaustively.",
151 docs_path: "explanations/health#cyclomatic-complexity",
152 },
153 RuleDef {
154 id: "fallow/high-cognitive-complexity",
155 name: "High Cognitive Complexity",
156 short: "Function has high cognitive complexity",
157 full: "SonarSource cognitive complexity exceeds the configured threshold. Unlike cyclomatic complexity, cognitive complexity penalizes nesting depth and non-linear control flow (breaks, continues, early returns). It measures how hard a function is to understand when reading sequentially.",
158 docs_path: "explanations/health#cognitive-complexity",
159 },
160 RuleDef {
161 id: "fallow/high-complexity",
162 name: "High Complexity (Both)",
163 short: "Function exceeds both complexity thresholds",
164 full: "Function exceeds both cyclomatic and cognitive complexity thresholds. This is the strongest signal that a function needs refactoring — it has many paths AND is hard to understand.",
165 docs_path: "explanations/health#complexity-metrics",
166 },
167 RuleDef {
168 id: "fallow/refactoring-target",
169 name: "Refactoring Target",
170 short: "File identified as a high-priority refactoring candidate",
171 full: "File identified as a refactoring candidate based on a weighted combination of complexity density, churn velocity, dead code ratio, fan-in (blast radius), and fan-out (coupling). Categories: urgent churn+complexity, break circular dependency, split high-impact file, remove dead code, extract complex functions, reduce coupling.",
172 docs_path: "explanations/health#refactoring-targets",
173 },
174 RuleDef {
175 id: "fallow/untested-file",
176 name: "Untested File",
177 short: "Runtime-reachable file has no test dependency path",
178 full: "A file is reachable from runtime entry points but not from any discovered test entry point. This indicates production code that no test imports, directly or transitively, according to the static module graph.",
179 docs_path: "explanations/health#coverage-gaps",
180 },
181 RuleDef {
182 id: "fallow/untested-export",
183 name: "Untested Export",
184 short: "Runtime-reachable export has no test dependency path",
185 full: "A value export is reachable from runtime entry points but no test-reachable module references it. This is a static test dependency gap rather than line coverage, and highlights exports exercised only through production entry paths.",
186 docs_path: "explanations/health#coverage-gaps",
187 },
188];
189
190pub const DUPES_RULES: &[RuleDef] = &[RuleDef {
191 id: "fallow/code-duplication",
192 name: "Code Duplication",
193 short: "Duplicated code block",
194 full: "A block of code that appears in multiple locations with identical or near-identical token sequences. Clone detection uses normalized token comparison — identifier names and literals are abstracted away in non-strict modes.",
195 docs_path: "explanations/duplication#clone-groups",
196}];
197
198#[must_use]
202pub fn check_meta() -> Value {
203 let rules: Value = CHECK_RULES
204 .iter()
205 .map(|r| {
206 (
207 r.id.replace("fallow/", ""),
208 json!({
209 "name": r.name,
210 "description": r.full,
211 "docs": rule_docs_url(r)
212 }),
213 )
214 })
215 .collect::<serde_json::Map<String, Value>>()
216 .into();
217
218 json!({
219 "docs": CHECK_DOCS,
220 "rules": rules
221 })
222}
223
224#[must_use]
226pub fn health_meta() -> Value {
227 json!({
228 "docs": HEALTH_DOCS,
229 "metrics": {
230 "cyclomatic": {
231 "name": "Cyclomatic Complexity",
232 "description": "McCabe cyclomatic complexity: 1 + number of decision points (if/else, switch cases, loops, ternary, logical operators). Measures the number of independent paths through a function.",
233 "range": "[1, \u{221e})",
234 "interpretation": "lower is better; default threshold: 20"
235 },
236 "cognitive": {
237 "name": "Cognitive Complexity",
238 "description": "SonarSource cognitive complexity: penalizes nesting depth and non-linear control flow (breaks, continues, early returns). Measures how hard a function is to understand when reading top-to-bottom.",
239 "range": "[0, \u{221e})",
240 "interpretation": "lower is better; default threshold: 15"
241 },
242 "line_count": {
243 "name": "Line Count",
244 "description": "Number of lines in the function body.",
245 "range": "[1, \u{221e})",
246 "interpretation": "context-dependent; long functions may need splitting"
247 },
248 "maintainability_index": {
249 "name": "Maintainability Index",
250 "description": "Composite score: 100 - (complexity_density \u{00d7} 30) - (dead_code_ratio \u{00d7} 20) - min(ln(fan_out+1) \u{00d7} 4, 15). Clamped to [0, 100]. Higher is better.",
251 "range": "[0, 100]",
252 "interpretation": "higher is better; <40 poor, 40\u{2013}70 moderate, >70 good"
253 },
254 "complexity_density": {
255 "name": "Complexity Density",
256 "description": "Total cyclomatic complexity divided by lines of code. Measures how densely complex the code is per line.",
257 "range": "[0, \u{221e})",
258 "interpretation": "lower is better; >1.0 indicates very dense complexity"
259 },
260 "dead_code_ratio": {
261 "name": "Dead Code Ratio",
262 "description": "Fraction of value exports (excluding type-only exports like interfaces and type aliases) with zero references across the project.",
263 "range": "[0, 1]",
264 "interpretation": "lower is better; 0 = all exports are used"
265 },
266 "fan_in": {
267 "name": "Fan-in (Importers)",
268 "description": "Number of files that import this file. High fan-in means high blast radius \u{2014} changes to this file affect many dependents.",
269 "range": "[0, \u{221e})",
270 "interpretation": "context-dependent; high fan-in files need careful review before changes"
271 },
272 "fan_out": {
273 "name": "Fan-out (Imports)",
274 "description": "Number of files this file directly imports. High fan-out indicates high coupling and change propagation risk.",
275 "range": "[0, \u{221e})",
276 "interpretation": "lower is better; MI penalty caps at ~40 imports"
277 },
278 "score": {
279 "name": "Hotspot Score",
280 "description": "normalized_churn \u{00d7} normalized_complexity \u{00d7} 100, where normalization is against the project maximum. Identifies files that are both complex AND frequently changing.",
281 "range": "[0, 100]",
282 "interpretation": "higher = riskier; prioritize refactoring high-score files"
283 },
284 "weighted_commits": {
285 "name": "Weighted Commits",
286 "description": "Recency-weighted commit count using exponential decay with 90-day half-life. Recent commits contribute more than older ones.",
287 "range": "[0, \u{221e})",
288 "interpretation": "higher = more recent churn activity"
289 },
290 "trend": {
291 "name": "Churn Trend",
292 "description": "Compares recent vs older commit frequency within the analysis window. accelerating = recent > 1.5\u{00d7} older, cooling = recent < 0.67\u{00d7} older, stable = in between.",
293 "values": ["accelerating", "stable", "cooling"],
294 "interpretation": "accelerating files need attention; cooling files are stabilizing"
295 },
296 "priority": {
297 "name": "Refactoring Priority",
298 "description": "Weighted score: complexity density (30%), hotspot boost (25%), dead code ratio (20%), fan-in (15%), fan-out (10%). Fan-in and fan-out normalization uses adaptive percentile-based thresholds (p95 of the project distribution). Does not use the maintainability index to avoid double-counting.",
299 "range": "[0, 100]",
300 "interpretation": "higher = more urgent to refactor"
301 },
302 "efficiency": {
303 "name": "Efficiency Score",
304 "description": "priority / effort_numeric (Low=1, Medium=2, High=3). Surfaces quick wins: high-priority, low-effort targets rank first. Default sort order.",
305 "range": "[0, 100] \u{2014} effective max depends on effort: Low=100, Medium=50, High\u{2248}33",
306 "interpretation": "higher = better quick-win value; targets are sorted by efficiency descending"
307 },
308 "effort": {
309 "name": "Effort Estimate",
310 "description": "Heuristic effort estimate based on file size, function count, and fan-in. Thresholds adapt to the project\u{2019}s distribution (percentile-based). Low: small file, few functions, low fan-in. High: large file, high fan-in, or many functions with high density. Medium: everything else.",
311 "values": ["low", "medium", "high"],
312 "interpretation": "low = quick win, high = needs planning and coordination"
313 },
314 "confidence": {
315 "name": "Confidence Level",
316 "description": "Reliability of the recommendation based on data source. High: deterministic graph/AST analysis (dead code, circular deps, complexity). Medium: heuristic thresholds (fan-in/fan-out coupling). Low: depends on git history quality (churn-based recommendations).",
317 "values": ["high", "medium", "low"],
318 "interpretation": "high = act on it, medium = verify context, low = treat as a signal, not a directive"
319 },
320 "health_score": {
321 "name": "Health Score",
322 "description": "Project-level aggregate score computed from vital signs: dead code, complexity, maintainability, hotspots, unused dependencies, and circular dependencies. Penalties subtracted from 100. Missing metrics (from pipelines that didn't run) don't penalize. Use --score to force full pipeline for maximum accuracy.",
323 "range": "[0, 100]",
324 "interpretation": "higher is better; A (85\u{2013}100), B (70\u{2013}84), C (55\u{2013}69), D (40\u{2013}54), F (0\u{2013}39)"
325 },
326 "crap_max": {
327 "name": "Untested Complexity Risk (CRAP)",
328 "description": "Change Risk Anti-Patterns score (Savoia & Evans, 2007). Static binary model: test-reachable file = CC, untested file = CC\u{00b2} + CC. Considers test-graph reachability from the module graph, not runtime code coverage. Files not imported by any test file are treated as 0% covered regardless of actual test execution.",
329 "range": "[1, \u{221e})",
330 "interpretation": "lower is better; >=30 is high-risk (CC >= 5 without test path)"
331 }
332 }
333 })
334}
335
336#[must_use]
338pub fn dupes_meta() -> Value {
339 json!({
340 "docs": DUPES_DOCS,
341 "metrics": {
342 "duplication_percentage": {
343 "name": "Duplication Percentage",
344 "description": "Fraction of total source tokens that appear in at least one clone group. Computed over the full analyzed file set.",
345 "range": "[0, 100]",
346 "interpretation": "lower is better"
347 },
348 "token_count": {
349 "name": "Token Count",
350 "description": "Number of normalized source tokens in the clone group. Tokens are language-aware (keywords, identifiers, operators, punctuation). Higher token count = larger duplicate.",
351 "range": "[1, \u{221e})",
352 "interpretation": "larger clones have higher refactoring value"
353 },
354 "line_count": {
355 "name": "Line Count",
356 "description": "Number of source lines spanned by the clone instance. Approximation of clone size for human readability.",
357 "range": "[1, \u{221e})",
358 "interpretation": "larger clones are more impactful to deduplicate"
359 },
360 "clone_groups": {
361 "name": "Clone Groups",
362 "description": "A set of code fragments with identical or near-identical normalized token sequences. Each group has 2+ instances across different locations.",
363 "interpretation": "each group is a single refactoring opportunity"
364 },
365 "clone_families": {
366 "name": "Clone Families",
367 "description": "Groups of clone groups that share the same set of files. Indicates systematic duplication patterns (e.g., mirrored directory structures).",
368 "interpretation": "families suggest extract-module refactoring opportunities"
369 }
370 }
371 })
372}
373
374#[cfg(test)]
375mod tests {
376 use super::*;
377
378 #[test]
381 fn rule_by_id_finds_check_rule() {
382 let rule = rule_by_id("fallow/unused-file").unwrap();
383 assert_eq!(rule.name, "Unused Files");
384 }
385
386 #[test]
387 fn rule_by_id_finds_health_rule() {
388 let rule = rule_by_id("fallow/high-cyclomatic-complexity").unwrap();
389 assert_eq!(rule.name, "High Cyclomatic Complexity");
390 }
391
392 #[test]
393 fn rule_by_id_finds_dupes_rule() {
394 let rule = rule_by_id("fallow/code-duplication").unwrap();
395 assert_eq!(rule.name, "Code Duplication");
396 }
397
398 #[test]
399 fn rule_by_id_returns_none_for_unknown() {
400 assert!(rule_by_id("fallow/nonexistent").is_none());
401 assert!(rule_by_id("").is_none());
402 }
403
404 #[test]
407 fn rule_docs_url_format() {
408 let rule = rule_by_id("fallow/unused-export").unwrap();
409 let url = rule_docs_url(rule);
410 assert!(url.starts_with("https://docs.fallow.tools/"));
411 assert!(url.contains("unused-exports"));
412 }
413
414 #[test]
417 fn check_rules_all_have_fallow_prefix() {
418 for rule in CHECK_RULES {
419 assert!(
420 rule.id.starts_with("fallow/"),
421 "rule {} should start with fallow/",
422 rule.id
423 );
424 }
425 }
426
427 #[test]
428 fn check_rules_all_have_docs_path() {
429 for rule in CHECK_RULES {
430 assert!(
431 !rule.docs_path.is_empty(),
432 "rule {} should have a docs_path",
433 rule.id
434 );
435 }
436 }
437
438 #[test]
439 fn check_rules_no_duplicate_ids() {
440 let mut seen = rustc_hash::FxHashSet::default();
441 for rule in CHECK_RULES.iter().chain(HEALTH_RULES).chain(DUPES_RULES) {
442 assert!(seen.insert(rule.id), "duplicate rule id: {}", rule.id);
443 }
444 }
445
446 #[test]
449 fn check_meta_has_docs_and_rules() {
450 let meta = check_meta();
451 assert!(meta.get("docs").is_some());
452 assert!(meta.get("rules").is_some());
453 let rules = meta["rules"].as_object().unwrap();
454 assert_eq!(rules.len(), CHECK_RULES.len());
456 assert!(rules.contains_key("unused-file"));
457 assert!(rules.contains_key("unused-export"));
458 assert!(rules.contains_key("unused-type"));
459 assert!(rules.contains_key("unused-dependency"));
460 assert!(rules.contains_key("unused-dev-dependency"));
461 assert!(rules.contains_key("unused-optional-dependency"));
462 assert!(rules.contains_key("unused-enum-member"));
463 assert!(rules.contains_key("unused-class-member"));
464 assert!(rules.contains_key("unresolved-import"));
465 assert!(rules.contains_key("unlisted-dependency"));
466 assert!(rules.contains_key("duplicate-export"));
467 assert!(rules.contains_key("type-only-dependency"));
468 assert!(rules.contains_key("circular-dependency"));
469 }
470
471 #[test]
472 fn check_meta_rule_has_required_fields() {
473 let meta = check_meta();
474 let rules = meta["rules"].as_object().unwrap();
475 for (key, value) in rules {
476 assert!(value.get("name").is_some(), "rule {key} missing 'name'");
477 assert!(
478 value.get("description").is_some(),
479 "rule {key} missing 'description'"
480 );
481 assert!(value.get("docs").is_some(), "rule {key} missing 'docs'");
482 }
483 }
484
485 #[test]
488 fn health_meta_has_metrics() {
489 let meta = health_meta();
490 assert!(meta.get("docs").is_some());
491 let metrics = meta["metrics"].as_object().unwrap();
492 assert!(metrics.contains_key("cyclomatic"));
493 assert!(metrics.contains_key("cognitive"));
494 assert!(metrics.contains_key("maintainability_index"));
495 assert!(metrics.contains_key("complexity_density"));
496 assert!(metrics.contains_key("fan_in"));
497 assert!(metrics.contains_key("fan_out"));
498 }
499
500 #[test]
503 fn dupes_meta_has_metrics() {
504 let meta = dupes_meta();
505 assert!(meta.get("docs").is_some());
506 let metrics = meta["metrics"].as_object().unwrap();
507 assert!(metrics.contains_key("duplication_percentage"));
508 assert!(metrics.contains_key("token_count"));
509 assert!(metrics.contains_key("clone_groups"));
510 assert!(metrics.contains_key("clone_families"));
511 }
512
513 #[test]
516 fn health_rules_all_have_fallow_prefix() {
517 for rule in HEALTH_RULES {
518 assert!(
519 rule.id.starts_with("fallow/"),
520 "health rule {} should start with fallow/",
521 rule.id
522 );
523 }
524 }
525
526 #[test]
527 fn health_rules_all_have_docs_path() {
528 for rule in HEALTH_RULES {
529 assert!(
530 !rule.docs_path.is_empty(),
531 "health rule {} should have a docs_path",
532 rule.id
533 );
534 }
535 }
536
537 #[test]
538 fn health_rules_all_have_non_empty_fields() {
539 for rule in HEALTH_RULES {
540 assert!(
541 !rule.name.is_empty(),
542 "health rule {} missing name",
543 rule.id
544 );
545 assert!(
546 !rule.short.is_empty(),
547 "health rule {} missing short description",
548 rule.id
549 );
550 assert!(
551 !rule.full.is_empty(),
552 "health rule {} missing full description",
553 rule.id
554 );
555 }
556 }
557
558 #[test]
561 fn dupes_rules_all_have_fallow_prefix() {
562 for rule in DUPES_RULES {
563 assert!(
564 rule.id.starts_with("fallow/"),
565 "dupes rule {} should start with fallow/",
566 rule.id
567 );
568 }
569 }
570
571 #[test]
572 fn dupes_rules_all_have_docs_path() {
573 for rule in DUPES_RULES {
574 assert!(
575 !rule.docs_path.is_empty(),
576 "dupes rule {} should have a docs_path",
577 rule.id
578 );
579 }
580 }
581
582 #[test]
583 fn dupes_rules_all_have_non_empty_fields() {
584 for rule in DUPES_RULES {
585 assert!(!rule.name.is_empty(), "dupes rule {} missing name", rule.id);
586 assert!(
587 !rule.short.is_empty(),
588 "dupes rule {} missing short description",
589 rule.id
590 );
591 assert!(
592 !rule.full.is_empty(),
593 "dupes rule {} missing full description",
594 rule.id
595 );
596 }
597 }
598
599 #[test]
602 fn check_rules_all_have_non_empty_fields() {
603 for rule in CHECK_RULES {
604 assert!(!rule.name.is_empty(), "check rule {} missing name", rule.id);
605 assert!(
606 !rule.short.is_empty(),
607 "check rule {} missing short description",
608 rule.id
609 );
610 assert!(
611 !rule.full.is_empty(),
612 "check rule {} missing full description",
613 rule.id
614 );
615 }
616 }
617
618 #[test]
621 fn rule_docs_url_health_rule() {
622 let rule = rule_by_id("fallow/high-cyclomatic-complexity").unwrap();
623 let url = rule_docs_url(rule);
624 assert!(url.starts_with("https://docs.fallow.tools/"));
625 assert!(url.contains("health"));
626 }
627
628 #[test]
629 fn rule_docs_url_dupes_rule() {
630 let rule = rule_by_id("fallow/code-duplication").unwrap();
631 let url = rule_docs_url(rule);
632 assert!(url.starts_with("https://docs.fallow.tools/"));
633 assert!(url.contains("duplication"));
634 }
635
636 #[test]
639 fn health_meta_all_metrics_have_name_and_description() {
640 let meta = health_meta();
641 let metrics = meta["metrics"].as_object().unwrap();
642 for (key, value) in metrics {
643 assert!(
644 value.get("name").is_some(),
645 "health metric {key} missing 'name'"
646 );
647 assert!(
648 value.get("description").is_some(),
649 "health metric {key} missing 'description'"
650 );
651 assert!(
652 value.get("interpretation").is_some(),
653 "health metric {key} missing 'interpretation'"
654 );
655 }
656 }
657
658 #[test]
659 fn health_meta_has_all_expected_metrics() {
660 let meta = health_meta();
661 let metrics = meta["metrics"].as_object().unwrap();
662 let expected = [
663 "cyclomatic",
664 "cognitive",
665 "line_count",
666 "maintainability_index",
667 "complexity_density",
668 "dead_code_ratio",
669 "fan_in",
670 "fan_out",
671 "score",
672 "weighted_commits",
673 "trend",
674 "priority",
675 "efficiency",
676 "effort",
677 "confidence",
678 ];
679 for key in &expected {
680 assert!(
681 metrics.contains_key(*key),
682 "health_meta missing expected metric: {key}"
683 );
684 }
685 }
686
687 #[test]
690 fn dupes_meta_all_metrics_have_name_and_description() {
691 let meta = dupes_meta();
692 let metrics = meta["metrics"].as_object().unwrap();
693 for (key, value) in metrics {
694 assert!(
695 value.get("name").is_some(),
696 "dupes metric {key} missing 'name'"
697 );
698 assert!(
699 value.get("description").is_some(),
700 "dupes metric {key} missing 'description'"
701 );
702 }
703 }
704
705 #[test]
706 fn dupes_meta_has_line_count() {
707 let meta = dupes_meta();
708 let metrics = meta["metrics"].as_object().unwrap();
709 assert!(metrics.contains_key("line_count"));
710 }
711
712 #[test]
715 fn check_docs_url_valid() {
716 assert!(CHECK_DOCS.starts_with("https://"));
717 assert!(CHECK_DOCS.contains("dead-code"));
718 }
719
720 #[test]
721 fn health_docs_url_valid() {
722 assert!(HEALTH_DOCS.starts_with("https://"));
723 assert!(HEALTH_DOCS.contains("health"));
724 }
725
726 #[test]
727 fn dupes_docs_url_valid() {
728 assert!(DUPES_DOCS.starts_with("https://"));
729 assert!(DUPES_DOCS.contains("dupes"));
730 }
731
732 #[test]
735 fn check_meta_docs_url_matches_constant() {
736 let meta = check_meta();
737 assert_eq!(meta["docs"].as_str().unwrap(), CHECK_DOCS);
738 }
739
740 #[test]
741 fn health_meta_docs_url_matches_constant() {
742 let meta = health_meta();
743 assert_eq!(meta["docs"].as_str().unwrap(), HEALTH_DOCS);
744 }
745
746 #[test]
747 fn dupes_meta_docs_url_matches_constant() {
748 let meta = dupes_meta();
749 assert_eq!(meta["docs"].as_str().unwrap(), DUPES_DOCS);
750 }
751
752 #[test]
755 fn rule_by_id_finds_all_check_rules() {
756 for rule in CHECK_RULES {
757 assert!(
758 rule_by_id(rule.id).is_some(),
759 "rule_by_id should find check rule {}",
760 rule.id
761 );
762 }
763 }
764
765 #[test]
766 fn rule_by_id_finds_all_health_rules() {
767 for rule in HEALTH_RULES {
768 assert!(
769 rule_by_id(rule.id).is_some(),
770 "rule_by_id should find health rule {}",
771 rule.id
772 );
773 }
774 }
775
776 #[test]
777 fn rule_by_id_finds_all_dupes_rules() {
778 for rule in DUPES_RULES {
779 assert!(
780 rule_by_id(rule.id).is_some(),
781 "rule_by_id should find dupes rule {}",
782 rule.id
783 );
784 }
785 }
786
787 #[test]
790 fn check_rules_count() {
791 assert_eq!(CHECK_RULES.len(), 13);
792 }
793
794 #[test]
795 fn health_rules_count() {
796 assert_eq!(HEALTH_RULES.len(), 6);
797 }
798
799 #[test]
800 fn dupes_rules_count() {
801 assert_eq!(DUPES_RULES.len(), 1);
802 }
803}