cargo_crap/score.rs
1//! CRAP (Change Risk Anti-Patterns) scoring.
2//!
3//! The formula, from Savoia & Evans (2007):
4//!
5//! ```text
6//! CRAP(m) = comp(m)² × (1 − cov(m)/100)³ + comp(m)
7//! ```
8//!
9//! where `comp(m)` is the cyclomatic complexity of method `m`, and `cov(m)`
10//! is the percentage of `m` exercised by automated tests.
11//!
12//! Interpretation notes (mirroring the original paper):
13//!
14//! - A trivial method (CC=1, coverage=100%) scores exactly 1.0. This is the
15//! lower bound.
16//! - At 100% coverage, `(1 − 1)³ = 0`, so the quadratic term vanishes and
17//! only the linear `CC` term remains. In other words: tests cap the damage
18//! that complexity can do, but they do not eliminate complexity itself.
19//! - Above CC=30, no amount of coverage keeps the score under the 30-point
20//! "crappiness" threshold. This is a property of the formula, not a bug:
21//! the tool refuses to certify monster methods as clean just because they
22//! happen to be tested.
23
24/// The default threshold above which a function is considered "crappy".
25///
26/// This matches the value used in the original Crap4j tool and `NDepend`.
27pub const DEFAULT_THRESHOLD: f64 = 30.0;
28
29/// Compute the CRAP score for a single function.
30///
31/// # Arguments
32/// - `complexity`: cyclomatic complexity (minimum 1.0; the linear path).
33/// - `coverage_pct`: test coverage percentage in `[0.0, 100.0]`. Values
34/// outside this range are clamped.
35///
36/// # Examples
37///
38/// ```
39/// use cargo_crap::score::crap;
40/// assert_eq!(crap(1.0, 100.0), 1.0); // trivial, tested
41/// assert_eq!(crap(6.0, 0.0), 42.0); // moderately complex, untested
42/// ```
43#[must_use]
44pub fn crap(
45 complexity: f64,
46 coverage_pct: f64,
47) -> f64 {
48 let uncovered = 1.0 - (coverage_pct.clamp(0.0, 100.0) / 100.0);
49 complexity.powi(2) * uncovered.powi(3) + complexity
50}
51
52/// Classify a CRAP score against a threshold.
53#[derive(Debug, Clone, Copy, PartialEq, Eq)]
54pub enum Severity {
55 /// Score is at or below the threshold.
56 Clean,
57 /// Score exceeds the threshold.
58 Crappy,
59}
60
61impl Severity {
62 #[must_use]
63 pub fn classify(
64 score: f64,
65 threshold: f64,
66 ) -> Self {
67 if score > threshold {
68 Self::Crappy
69 } else {
70 Self::Clean
71 }
72 }
73}
74
75#[cfg(test)]
76#[expect(
77 clippy::float_cmp,
78 reason = "CRAP formula is deterministic; exact equality is the right comparison"
79)]
80mod tests {
81 use super::*;
82
83 // These tests fix properties of the formula. They are not "coverage for
84 // the sake of coverage" — each one would catch a specific regression if
85 // the formula were ever rewritten.
86
87 #[test]
88 fn trivial_method_scores_one() {
89 // CC=1 is the minimum for any non-empty function: the straight-line
90 // path. Fully covered, it must score exactly 1.
91 assert_eq!(crap(1.0, 100.0), 1.0);
92 }
93
94 #[test]
95 fn untested_complex_method_matches_published_example() {
96 // Savoia's worked example: CC=6, cov=0%.
97 // 6² × 1³ + 6 = 36 + 6 = 42.
98 // If this ever drifts, the formula has been corrupted.
99 assert_eq!(crap(6.0, 0.0), 42.0);
100 }
101
102 #[test]
103 fn full_coverage_leaves_only_linear_term() {
104 // (1 − 100/100)³ = 0, so the quadratic term zeroes out.
105 // The score equals the raw complexity.
106 assert_eq!(crap(20.0, 100.0), 20.0);
107 assert_eq!(crap(5.0, 100.0), 5.0);
108 }
109
110 #[test]
111 fn cc_above_threshold_is_irredeemable_even_with_full_coverage() {
112 // This is the whole point of the non-linear term: some methods are
113 // just too complex to be "clean" regardless of tests.
114 assert!(crap(31.0, 100.0) > DEFAULT_THRESHOLD);
115 assert!(crap(50.0, 100.0) > DEFAULT_THRESHOLD);
116 }
117
118 #[test]
119 fn score_is_monotonic_in_complexity_at_fixed_coverage() {
120 // Holding coverage constant, more complex code must never score lower.
121 for cov in [0.0, 25.0, 50.0, 75.0, 100.0] {
122 let a = crap(2.0, cov);
123 let b = crap(5.0, cov);
124 let c = crap(10.0, cov);
125 assert!(a <= b, "monotonicity broken at cov={cov}: {a} vs {b}");
126 assert!(b <= c, "monotonicity broken at cov={cov}: {b} vs {c}");
127 }
128 }
129
130 #[test]
131 fn score_is_monotonic_nonincreasing_in_coverage_at_fixed_complexity() {
132 // Holding complexity constant, more tests must never make the score
133 // worse. This is a property users rely on when deciding to write tests.
134 for cc in [1.0, 3.0, 10.0, 25.0] {
135 let mut prev = f64::INFINITY;
136 for cov in [0.0, 25.0, 50.0, 75.0, 100.0] {
137 let s = crap(cc, cov);
138 assert!(s <= prev, "cov↑ made score worse at cc={cc}");
139 prev = s;
140 }
141 }
142 }
143
144 #[test]
145 fn coverage_is_clamped() {
146 // Coverage out of range shouldn't blow up with negative cubes etc.
147 // We treat −10% as 0%, and 150% as 100%.
148 assert_eq!(crap(5.0, -10.0), crap(5.0, 0.0));
149 assert_eq!(crap(5.0, 150.0), crap(5.0, 100.0));
150 }
151
152 #[test]
153 fn severity_classifies_at_threshold_boundary() {
154 // Exactly at the threshold is not crappy; strictly above is.
155 assert_eq!(Severity::classify(30.0, 30.0), Severity::Clean);
156 assert_eq!(Severity::classify(30.0001, 30.0), Severity::Crappy);
157 }
158}