Skip to main content

codelens_core/insight/scoring/
default.rs

1//! Default scoring model for general-purpose projects.
2
3use super::{DimensionWeight, HealthDimension, RawMetrics, ScoringModel};
4
5pub struct DefaultModel;
6
7impl DefaultModel {
8    pub fn new() -> Self {
9        Self
10    }
11}
12
13impl Default for DefaultModel {
14    fn default() -> Self {
15        Self::new()
16    }
17}
18
19impl ScoringModel for DefaultModel {
20    fn name(&self) -> &str {
21        "default"
22    }
23
24    fn dimensions(&self) -> &[DimensionWeight] {
25        &[
26            DimensionWeight {
27                dimension: HealthDimension::Complexity,
28                weight: 0.30,
29            },
30            DimensionWeight {
31                dimension: HealthDimension::FuncSize,
32                weight: 0.20,
33            },
34            DimensionWeight {
35                dimension: HealthDimension::CommentRatio,
36                weight: 0.15,
37            },
38            DimensionWeight {
39                dimension: HealthDimension::FileSize,
40                weight: 0.20,
41            },
42            DimensionWeight {
43                dimension: HealthDimension::NestingDepth,
44                weight: 0.15,
45            },
46        ]
47    }
48
49    fn score_dimension(&self, dimension: HealthDimension, metrics: &RawMetrics) -> f64 {
50        match dimension {
51            HealthDimension::Complexity => score_complexity(metrics.avg_cyclomatic),
52            HealthDimension::FuncSize => score_func_size(metrics.avg_func_lines),
53            HealthDimension::CommentRatio => score_comment_ratio(metrics.comment_ratio),
54            HealthDimension::FileSize => score_file_size(metrics.avg_file_lines),
55            HealthDimension::NestingDepth => score_nesting(metrics.depth),
56        }
57    }
58}
59
60fn interpolate(value: f64, breakpoints: &[(f64, f64)]) -> f64 {
61    if breakpoints.is_empty() {
62        return 50.0;
63    }
64    if value <= breakpoints[0].0 {
65        return breakpoints[0].1;
66    }
67    if value >= breakpoints[breakpoints.len() - 1].0 {
68        return breakpoints[breakpoints.len() - 1].1;
69    }
70    for window in breakpoints.windows(2) {
71        let (x0, y0) = window[0];
72        let (x1, y1) = window[1];
73        if value >= x0 && value <= x1 {
74            let t = (value - x0) / (x1 - x0);
75            return y0 + t * (y1 - y0);
76        }
77    }
78    50.0
79}
80
81fn score_complexity(avg_cc: f64) -> f64 {
82    interpolate(
83        avg_cc,
84        &[
85            (0.0, 100.0),
86            (3.0, 100.0),
87            (6.0, 80.0),
88            (10.0, 60.0),
89            (15.0, 40.0),
90            (25.0, 20.0),
91        ],
92    )
93}
94
95fn score_func_size(avg_lines: f64) -> f64 {
96    interpolate(
97        avg_lines,
98        &[
99            (0.0, 100.0),
100            (15.0, 100.0),
101            (30.0, 80.0),
102            (50.0, 60.0),
103            (80.0, 40.0),
104            (150.0, 20.0),
105        ],
106    )
107}
108
109fn score_comment_ratio(ratio: f64) -> f64 {
110    interpolate(
111        ratio,
112        &[
113            (0.0, 40.0),
114            (0.03, 70.0),
115            (0.05, 100.0),
116            (0.30, 100.0),
117            (0.50, 70.0),
118            (0.80, 40.0),
119        ],
120    )
121}
122
123fn score_file_size(avg_lines: f64) -> f64 {
124    interpolate(
125        avg_lines,
126        &[
127            (0.0, 100.0),
128            (200.0, 100.0),
129            (400.0, 80.0),
130            (600.0, 60.0),
131            (1000.0, 40.0),
132            (2000.0, 20.0),
133        ],
134    )
135}
136
137fn score_nesting(depth: usize) -> f64 {
138    interpolate(
139        depth as f64,
140        &[
141            (0.0, 100.0),
142            (3.0, 100.0),
143            (4.0, 80.0),
144            (6.0, 60.0),
145            (8.0, 40.0),
146            (12.0, 20.0),
147        ],
148    )
149}
150
151#[cfg(test)]
152mod tests {
153    use super::*;
154
155    #[test]
156    fn test_interpolate_at_breakpoint() {
157        let bp = &[(0.0, 100.0), (10.0, 0.0)];
158        assert!((interpolate(0.0, bp) - 100.0).abs() < 0.01);
159        assert!((interpolate(10.0, bp) - 0.0).abs() < 0.01);
160    }
161
162    #[test]
163    fn test_interpolate_midpoint() {
164        let bp = &[(0.0, 100.0), (10.0, 0.0)];
165        assert!((interpolate(5.0, bp) - 50.0).abs() < 0.01);
166    }
167
168    #[test]
169    fn test_interpolate_below_min() {
170        let bp = &[(5.0, 80.0), (10.0, 40.0)];
171        assert!((interpolate(2.0, bp) - 80.0).abs() < 0.01);
172    }
173
174    #[test]
175    fn test_interpolate_above_max() {
176        let bp = &[(0.0, 100.0), (10.0, 20.0)];
177        assert!((interpolate(999.0, bp) - 20.0).abs() < 0.01);
178    }
179
180    #[test]
181    fn test_score_complexity_excellent() {
182        assert!((score_complexity(2.0) - 100.0).abs() < 0.01);
183    }
184
185    #[test]
186    fn test_score_complexity_poor() {
187        let score = score_complexity(20.0);
188        assert!(score < 40.0);
189        assert!(score > 20.0);
190    }
191
192    #[test]
193    fn test_score_comment_ratio_sweet_spot() {
194        assert!((score_comment_ratio(0.10) - 100.0).abs() < 0.01);
195        assert!((score_comment_ratio(0.20) - 100.0).abs() < 0.01);
196    }
197
198    #[test]
199    fn test_score_comment_ratio_too_few() {
200        assert!(score_comment_ratio(0.0) < 50.0);
201    }
202
203    #[test]
204    fn test_score_comment_ratio_too_many() {
205        assert!(score_comment_ratio(0.80) < 50.0);
206    }
207
208    #[test]
209    fn test_default_model_total_score() {
210        let model = DefaultModel::new();
211        let metrics = RawMetrics {
212            avg_cyclomatic: 2.0,
213            avg_func_lines: 10.0,
214            comment_ratio: 0.15,
215            depth: 2,
216            avg_file_lines: 100.0,
217            total_files: 10,
218        };
219        let score = model.total_score(&metrics);
220        assert!(
221            score >= 90.0,
222            "excellent metrics should give A grade, got {score}"
223        );
224    }
225
226    #[test]
227    fn test_default_model_poor_score() {
228        let model = DefaultModel::new();
229        let metrics = RawMetrics {
230            avg_cyclomatic: 20.0,
231            avg_func_lines: 100.0,
232            comment_ratio: 0.0,
233            depth: 10,
234            avg_file_lines: 1500.0,
235            total_files: 5,
236        };
237        let score = model.total_score(&metrics);
238        assert!(
239            score < 50.0,
240            "poor metrics should give F grade, got {score}"
241        );
242    }
243
244    #[test]
245    fn test_default_model_grade() {
246        let model = DefaultModel::new();
247        assert_eq!(model.grade(95.0), crate::insight::Grade::A);
248        assert_eq!(model.grade(85.0), crate::insight::Grade::B);
249        assert_eq!(model.grade(75.0), crate::insight::Grade::C);
250        assert_eq!(model.grade(65.0), crate::insight::Grade::D);
251        assert_eq!(model.grade(50.0), crate::insight::Grade::F);
252    }
253
254    #[test]
255    fn test_default_model_name() {
256        assert_eq!(DefaultModel::new().name(), "default");
257    }
258
259    #[test]
260    fn test_default_model_weights_sum_to_one() {
261        let model = DefaultModel::new();
262        let total: f64 = model.dimensions().iter().map(|d| d.weight).sum();
263        assert!((total - 1.0).abs() < 0.001);
264    }
265}