Skip to main content

codelens_core/insight/
health.rs

1//! Code health score analysis.
2//!
3//! Computes health scores at three levels: project, directory, and file.
4
5use std::collections::HashMap;
6use std::path::{Path, PathBuf};
7
8use serde::Serialize;
9
10use crate::analyzer::stats::{AnalysisResult, FileStats};
11use crate::insight::scoring::{HealthDimension, RawMetrics, ScoringModel};
12use crate::insight::Grade;
13
14#[derive(Debug, Clone, Serialize)]
15pub struct DimensionScore {
16    pub dimension: HealthDimension,
17    pub score: f64,
18    pub grade: Grade,
19    pub weight: f64,
20}
21
22#[derive(Debug, Clone, Serialize)]
23pub struct FileHealth {
24    pub path: PathBuf,
25    pub score: f64,
26    pub grade: Grade,
27    pub top_issue: HealthDimension,
28    pub dimensions: Vec<DimensionScore>,
29}
30
31#[derive(Debug, Clone, Serialize)]
32pub struct DirectoryHealth {
33    pub path: PathBuf,
34    pub score: f64,
35    pub grade: Grade,
36    pub file_count: usize,
37}
38
39#[derive(Debug, Clone, Serialize)]
40pub struct HealthReport {
41    pub score: f64,
42    pub grade: Grade,
43    pub model: String,
44    pub dimensions: Vec<DimensionScore>,
45    pub by_directory: Vec<DirectoryHealth>,
46    pub worst_files: Vec<FileHealth>,
47}
48
49/// Generate a health report from analysis results using the given scoring model.
50pub fn score(result: &AnalysisResult, model: &dyn ScoringModel, top_n: usize) -> HealthReport {
51    // File-level scoring
52    let mut file_healths: Vec<FileHealth> =
53        result.files.iter().map(|f| score_file(f, model)).collect();
54
55    // Sort by score ascending (worst first)
56    file_healths.sort_by(|a, b| {
57        a.score
58            .partial_cmp(&b.score)
59            .unwrap_or(std::cmp::Ordering::Equal)
60    });
61
62    // Directory-level scoring
63    let mut dir_files: HashMap<PathBuf, Vec<&FileStats>> = HashMap::new();
64    for file in &result.files {
65        let dir = file.path.parent().unwrap_or(Path::new(".")).to_path_buf();
66        dir_files.entry(dir).or_default().push(file);
67    }
68
69    let mut dir_healths: Vec<DirectoryHealth> = dir_files
70        .iter()
71        .map(|(dir, files)| {
72            let metrics = RawMetrics::from_file_refs(files);
73            let dir_score = model.total_score(&metrics);
74            DirectoryHealth {
75                path: dir.clone(),
76                score: dir_score,
77                grade: model.grade(dir_score),
78                file_count: files.len(),
79            }
80        })
81        .collect();
82
83    dir_healths.sort_by(|a, b| {
84        a.score
85            .partial_cmp(&b.score)
86            .unwrap_or(std::cmp::Ordering::Equal)
87    });
88
89    // Project-level scoring
90    let project_metrics = RawMetrics::from_files(&result.files);
91    let project_score = model.total_score(&project_metrics);
92    let project_dimensions = score_dimensions(&project_metrics, model);
93
94    HealthReport {
95        score: project_score,
96        grade: model.grade(project_score),
97        model: model.name().to_string(),
98        dimensions: project_dimensions,
99        by_directory: dir_healths.into_iter().take(top_n).collect(),
100        worst_files: file_healths.into_iter().take(top_n).collect(),
101    }
102}
103
104fn score_file(file: &FileStats, model: &dyn ScoringModel) -> FileHealth {
105    let metrics = RawMetrics::from_file(file);
106    let dimensions = score_dimensions(&metrics, model);
107    let total = model.total_score(&metrics);
108
109    let top_issue = dimensions
110        .iter()
111        .min_by(|a, b| {
112            a.score
113                .partial_cmp(&b.score)
114                .unwrap_or(std::cmp::Ordering::Equal)
115        })
116        .map(|d| d.dimension)
117        .unwrap_or(HealthDimension::Complexity);
118
119    FileHealth {
120        path: file.path.clone(),
121        score: total,
122        grade: model.grade(total),
123        top_issue,
124        dimensions,
125    }
126}
127
128fn score_dimensions(metrics: &RawMetrics, model: &dyn ScoringModel) -> Vec<DimensionScore> {
129    model
130        .dimensions()
131        .iter()
132        .map(|dw| {
133            let s = model.score_dimension(dw.dimension, metrics);
134            DimensionScore {
135                dimension: dw.dimension,
136                score: s,
137                grade: model.grade(s),
138                weight: dw.weight,
139            }
140        })
141        .collect()
142}
143
144#[cfg(test)]
145mod tests {
146    use super::*;
147    use crate::analyzer::stats::{Complexity, FileStats, LineStats, Summary};
148    use crate::insight::scoring::default::DefaultModel;
149    use std::time::Duration;
150
151    fn make_test_result() -> AnalysisResult {
152        let files = vec![
153            FileStats {
154                path: PathBuf::from("src/good.rs"),
155                language: "Rust".to_string(),
156                lines: LineStats {
157                    total: 50,
158                    code: 40,
159                    comment: 5,
160                    blank: 5,
161                },
162                size: 1000,
163                complexity: Complexity {
164                    functions: 3,
165                    cyclomatic: 6,
166                    max_depth: 2,
167                    avg_func_lines: 13.0,
168                },
169            },
170            FileStats {
171                path: PathBuf::from("src/bad.rs"),
172                language: "Rust".to_string(),
173                lines: LineStats {
174                    total: 500,
175                    code: 400,
176                    comment: 10,
177                    blank: 90,
178                },
179                size: 10000,
180                complexity: Complexity {
181                    functions: 2,
182                    cyclomatic: 30,
183                    max_depth: 8,
184                    avg_func_lines: 200.0,
185                },
186            },
187            FileStats {
188                path: PathBuf::from("lib/utils.rs"),
189                language: "Rust".to_string(),
190                lines: LineStats {
191                    total: 80,
192                    code: 60,
193                    comment: 10,
194                    blank: 10,
195                },
196                size: 1500,
197                complexity: Complexity {
198                    functions: 5,
199                    cyclomatic: 10,
200                    max_depth: 3,
201                    avg_func_lines: 12.0,
202                },
203            },
204        ];
205        AnalysisResult {
206            summary: Summary::from_file_stats(&files),
207            files,
208            elapsed: Duration::from_millis(50),
209            scanned_files: 3,
210            skipped_files: 0,
211        }
212    }
213
214    #[test]
215    fn test_health_report_structure() {
216        let result = make_test_result();
217        let model = DefaultModel::new();
218        let report = score(&result, &model, 10);
219        assert_eq!(report.model, "default");
220        assert!(!report.dimensions.is_empty());
221        assert!(!report.worst_files.is_empty());
222        assert!(!report.by_directory.is_empty());
223    }
224
225    #[test]
226    fn test_worst_files_sorted_ascending() {
227        let result = make_test_result();
228        let model = DefaultModel::new();
229        let report = score(&result, &model, 10);
230        for window in report.worst_files.windows(2) {
231            assert!(window[0].score <= window[1].score);
232        }
233    }
234
235    #[test]
236    fn test_bad_file_has_lower_score() {
237        let result = make_test_result();
238        let model = DefaultModel::new();
239        let report = score(&result, &model, 10);
240        let bad = report
241            .worst_files
242            .iter()
243            .find(|f| f.path.ends_with("bad.rs"))
244            .unwrap();
245        let good = report
246            .worst_files
247            .iter()
248            .find(|f| f.path.ends_with("good.rs"))
249            .unwrap();
250        assert!(bad.score < good.score);
251    }
252
253    #[test]
254    fn test_directory_grouping() {
255        let result = make_test_result();
256        let model = DefaultModel::new();
257        let report = score(&result, &model, 10);
258        assert_eq!(report.by_directory.len(), 2);
259        let dir_paths: Vec<&Path> = report
260            .by_directory
261            .iter()
262            .map(|d| d.path.as_path())
263            .collect();
264        assert!(dir_paths.contains(&Path::new("src")));
265        assert!(dir_paths.contains(&Path::new("lib")));
266    }
267
268    #[test]
269    fn test_top_n_limits() {
270        let result = make_test_result();
271        let model = DefaultModel::new();
272        let report = score(&result, &model, 1);
273        assert_eq!(report.worst_files.len(), 1);
274        assert_eq!(report.by_directory.len(), 1);
275    }
276
277    #[test]
278    fn test_empty_result() {
279        let result = AnalysisResult {
280            files: vec![],
281            summary: Summary::default(),
282            elapsed: Duration::from_millis(1),
283            scanned_files: 0,
284            skipped_files: 0,
285        };
286        let model = DefaultModel::new();
287        let report = score(&result, &model, 10);
288        assert!(report.worst_files.is_empty());
289        assert!(report.by_directory.is_empty());
290    }
291
292    #[test]
293    fn test_file_top_issue() {
294        let result = make_test_result();
295        let model = DefaultModel::new();
296        let report = score(&result, &model, 10);
297        let bad = report
298            .worst_files
299            .iter()
300            .find(|f| f.path.ends_with("bad.rs"))
301            .unwrap();
302        assert!(bad.dimensions.iter().any(|d| d.dimension == bad.top_issue));
303    }
304}