use crate::config::ProjectConfig;
use crate::graph::GraphStore;
use crate::models::{Finding, Severity};
use std::collections::{HashMap, HashSet};
use tracing::{debug, info};
pub fn escalate_compound_smells(findings: &mut [Finding]) {
let mut location_groups: HashMap<String, Vec<usize>> = HashMap::new();
for (idx, finding) in findings.iter().enumerate() {
if finding.affected_files.is_empty() {
continue;
}
let file = finding.affected_files[0].to_string_lossy().to_string();
let line_start = finding.line_start.unwrap_or(0);
let _line_end = finding.line_end.unwrap_or(line_start);
let bucket = line_start / 50;
let key = format!("{}:{}", file, bucket);
location_groups.entry(key).or_default().push(idx);
}
for (_location, indices) in location_groups.iter() {
if indices.len() < 2 {
continue;
}
let unique_detectors: HashSet<&str> = indices
.iter()
.map(|&idx| findings[idx].detector.as_str())
.collect();
let detector_count = unique_detectors.len();
if detector_count >= 2 {
for &idx in indices {
if !findings[idx].description.starts_with("[COMPOUND") {
findings[idx].description = format!(
"[COMPOUND: {} co-located issues] {}",
detector_count, findings[idx].description
);
findings[idx].confidence =
Some((findings[idx].confidence.unwrap_or(0.7) + 0.1).min(1.0));
}
}
debug!(
"Marked {} findings as compound smell ({} detectors)",
indices.len(),
detector_count
);
}
}
}
const MAX_MODULARITY_BONUS: f64 = 0.10; const MAX_COHESION_BONUS: f64 = 0.05; const MAX_CLEAN_DEPS_BONUS: f64 = 0.10; const MAX_COMPLEXITY_DIST_BONUS: f64 = 0.05; const MAX_TEST_COVERAGE_BONUS: f64 = 0.05;
#[derive(Debug, Clone)]
pub struct PillarBreakdown {
pub name: String,
pub base_score: f64,
pub bonus_ratio: f64,
pub final_score: f64,
pub bonuses: Vec<(String, f64)>,
pub penalty_points: f64,
pub finding_count: usize,
}
#[derive(Debug, Clone)]
pub struct ScoreBreakdown {
pub overall_score: f64,
pub grade: String,
pub structure: PillarBreakdown,
pub quality: PillarBreakdown,
pub architecture: PillarBreakdown,
pub graph_metrics: GraphMetrics,
}
#[derive(Debug, Clone, Default)]
pub struct GraphMetrics {
pub module_count: usize,
pub avg_coupling: f64,
pub avg_cohesion: f64,
pub cycle_count: usize,
pub simple_function_ratio: f64,
pub test_file_ratio: f64,
pub total_functions: usize,
pub total_files: usize,
pub total_loc: usize,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Pillar {
Structure,
Quality,
Architecture,
}
fn classify_pillar(category: &str, detector: &str, is_security: bool) -> Pillar {
if is_security {
return Pillar::Quality;
}
match category {
c if c.contains("complex") => Pillar::Structure,
c if c.contains("naming") => Pillar::Structure,
c if c.contains("readab") => Pillar::Structure,
c if c.contains("style") => Pillar::Structure,
c if c.contains("maintainab") => Pillar::Structure,
c if c.contains("architect") => Pillar::Architecture,
c if c.contains("bottleneck") => Pillar::Architecture,
c if c.contains("circular") => Pillar::Architecture,
c if c.contains("coupling") => Pillar::Architecture,
c if c.contains("security") => Pillar::Quality,
c if c.contains("reliab") => Pillar::Quality,
c if c.contains("correct") => Pillar::Quality,
c if c.contains("performance") => Pillar::Quality,
c if c.contains("error") => Pillar::Quality,
c if c.contains("safety") => Pillar::Quality,
_ => {
if detector.contains("dependency") || detector.contains("import") {
Pillar::Architecture
} else if detector.contains("large") || detector.contains("nesting")
|| detector.contains("dead") || detector.contains("naming")
{
Pillar::Structure
} else {
Pillar::Quality
}
}
}
}
pub struct GraphScorer<'a> {
graph: &'a GraphStore,
config: &'a ProjectConfig,
}
impl<'a> GraphScorer<'a> {
pub fn new(graph: &'a GraphStore, config: &'a ProjectConfig) -> Self {
Self { graph, config }
}
pub fn calculate(&self, findings: &[Finding]) -> ScoreBreakdown {
let metrics = self.compute_graph_metrics();
let modularity_bonus = self.calculate_modularity_bonus(&metrics);
let cohesion_bonus = self.calculate_cohesion_bonus(&metrics);
let clean_deps_bonus = self.calculate_clean_deps_bonus(&metrics);
let complexity_bonus = self.calculate_complexity_bonus(&metrics);
let test_bonus = self.calculate_test_bonus(&metrics);
debug!(
"Graph bonuses: modularity={:.1}%, cohesion={:.1}%, clean_deps={:.1}%, complexity={:.1}%, tests={:.1}%",
modularity_bonus * 100.0,
cohesion_bonus * 100.0,
clean_deps_bonus * 100.0,
complexity_bonus * 100.0,
test_bonus * 100.0
);
let kloc = (metrics.total_loc as f64 / 1000.0).max(1.0);
let severity_weight = |severity: &Severity| -> f64 {
match severity {
Severity::Critical => 8.0,
Severity::High => 4.0,
Severity::Medium => 1.0,
Severity::Low => 0.2,
Severity::Info => 0.0,
}
};
const DENSITY_SCALE: f64 = 5.0;
let mut structure_penalty = 0.0;
let mut quality_penalty = 0.0;
let mut architecture_penalty = 0.0;
let mut structure_count = 0;
let mut quality_count = 0;
let mut architecture_count = 0;
for finding in findings {
let scaled = severity_weight(&finding.severity) * DENSITY_SCALE / kloc;
let category = finding.category.as_deref().unwrap_or("");
let detector = finding.detector.to_lowercase();
let is_security = self.is_security_finding(finding);
let security_mult = if is_security {
self.config.scoring.security_multiplier
} else {
1.0
};
let effective = scaled * security_mult;
let pillar = classify_pillar(category, &detector, is_security);
match pillar {
Pillar::Quality => {
quality_penalty += effective;
quality_count += 1;
}
Pillar::Structure => {
structure_penalty += effective;
structure_count += 1;
}
Pillar::Architecture => {
architecture_penalty += effective;
architecture_count += 1;
}
}
}
let structure = self.build_pillar(
"Structure",
structure_penalty,
structure_count,
vec![("Complexity distribution", complexity_bonus)],
);
let quality = self.build_pillar(
"Quality",
quality_penalty,
quality_count,
vec![("Test coverage signal", test_bonus)],
);
let architecture = self.build_pillar(
"Architecture",
architecture_penalty,
architecture_count,
vec![
("Modularity (low coupling)", modularity_bonus),
("Cohesion", cohesion_bonus),
("Clean dependencies (no cycles)", clean_deps_bonus),
],
);
let mut weights = self.config.scoring.pillar_weights.clone();
if !weights.is_valid() {
tracing::warn!(
"Pillar weights sum to {:.3} (expected 1.0), normalizing",
weights.structure + weights.quality + weights.architecture
);
weights.normalize();
}
let overall = structure.final_score * weights.structure
+ quality.final_score * weights.quality
+ architecture.final_score * weights.architecture;
let overall = overall.max(5.0);
let has_medium_plus = findings.iter().any(|f| {
matches!(f.severity, Severity::Critical | Severity::High | Severity::Medium)
});
let overall = if has_medium_plus && overall >= 99.95 {
99.9
} else {
overall
};
let grade = self.calculate_grade(overall, findings);
info!(
"Health score: {:.1} ({}) - Structure: {:.1}, Quality: {:.1}, Architecture: {:.1}",
overall, grade, structure.final_score, quality.final_score, architecture.final_score
);
ScoreBreakdown {
overall_score: overall,
grade,
structure,
quality,
architecture,
graph_metrics: metrics,
}
}
fn build_pillar(
&self,
name: &str,
penalty: f64,
finding_count: usize,
bonuses: Vec<(&str, f64)>,
) -> PillarBreakdown {
let base_score = (100.0 - penalty).clamp(25.0, 100.0);
let total_bonus: f64 = bonuses.iter().map(|(_, b)| b).sum();
let bonus_points = total_bonus * 100.0;
let capped_bonus = if penalty > 0.0 {
bonus_points.min(penalty * 0.5)
} else {
bonus_points
};
let final_score = (base_score + capped_bonus).min(100.0);
PillarBreakdown {
name: name.to_string(),
base_score,
bonus_ratio: total_bonus,
final_score,
bonuses: bonuses
.into_iter()
.map(|(n, v)| (n.to_string(), v))
.collect(),
penalty_points: penalty,
finding_count,
}
}
fn compute_graph_metrics(&self) -> GraphMetrics {
let functions = self.graph.get_functions();
let files = self.graph.get_files();
let calls = self.graph.get_calls();
let _imports = self.graph.get_imports();
let modules: HashSet<String> = files
.iter()
.filter_map(|f| {
let path = std::path::Path::new(&f.file_path);
path.parent().map(|p| p.to_string_lossy().to_string())
})
.collect();
let mut cross_module_calls = 0;
let func_to_module: HashMap<&str, String> = functions
.iter()
.map(|f| {
let module = std::path::Path::new(&f.file_path)
.parent()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default();
(f.qualified_name.as_str(), module)
})
.collect();
for (caller, callee) in &calls {
let caller_mod = func_to_module.get(caller.as_str());
let callee_mod = func_to_module.get(callee.as_str());
if caller_mod != callee_mod && caller_mod.is_some() && callee_mod.is_some() {
cross_module_calls += 1;
}
}
debug!(
"Call graph: {} total calls, {} cross-module, {} modules",
calls.len(),
cross_module_calls,
modules.len()
);
let avg_coupling = if calls.is_empty() {
0.0
} else {
cross_module_calls as f64 / calls.len() as f64
};
let intra_module_calls = calls.len() - cross_module_calls;
let avg_cohesion = if calls.is_empty() {
1.0 } else {
intra_module_calls as f64 / calls.len() as f64
};
debug!(
"Coupling: {:.1}%, Cohesion: {:.1}%",
avg_coupling * 100.0,
avg_cohesion * 100.0
);
let import_cycles = self.graph.find_import_cycles();
let call_cycles = self.graph.find_call_cycles();
let cycle_count = import_cycles.len() + call_cycles.len();
let simple_count = functions
.iter()
.filter(|f| f.complexity().unwrap_or(1) <= 10)
.count();
let simple_ratio = if functions.is_empty() {
1.0
} else {
simple_count as f64 / functions.len() as f64
};
let test_files = files
.iter()
.filter(|f| self.is_test_file(&f.file_path))
.count();
let test_ratio = if files.is_empty() {
0.0
} else {
test_files as f64 / files.len() as f64
};
let total_loc: usize = files
.iter()
.map(|f| f.get_i64("loc").unwrap_or(0) as usize)
.sum();
GraphMetrics {
module_count: modules.len(),
avg_coupling,
avg_cohesion,
cycle_count,
simple_function_ratio: simple_ratio,
test_file_ratio: test_ratio,
total_functions: functions.len(),
total_files: files.len(),
total_loc,
}
}
fn calculate_modularity_bonus(&self, metrics: &GraphMetrics) -> f64 {
let coupling_score = 1.0 - ((metrics.avg_coupling - 0.3) / 0.4).clamp(0.0, 1.0);
coupling_score * MAX_MODULARITY_BONUS
}
fn calculate_cohesion_bonus(&self, metrics: &GraphMetrics) -> f64 {
let cohesion_score = ((metrics.avg_cohesion - 0.3) / 0.4).clamp(0.0, 1.0);
cohesion_score * MAX_COHESION_BONUS
}
fn calculate_clean_deps_bonus(&self, metrics: &GraphMetrics) -> f64 {
let penalty = (metrics.cycle_count as f64 * 0.2).min(1.0);
(1.0 - penalty) * MAX_CLEAN_DEPS_BONUS
}
fn calculate_complexity_bonus(&self, metrics: &GraphMetrics) -> f64 {
let score = ((metrics.simple_function_ratio - 0.5) / 0.4).clamp(0.0, 1.0);
score * MAX_COMPLEXITY_DIST_BONUS
}
fn calculate_test_bonus(&self, metrics: &GraphMetrics) -> f64 {
let score = (metrics.test_file_ratio / 0.2).clamp(0.0, 1.0);
score * MAX_TEST_COVERAGE_BONUS
}
fn is_security_finding(&self, finding: &Finding) -> bool {
let category = finding.category.as_deref().unwrap_or("");
let detector = finding.detector.to_lowercase();
category.contains("security")
|| category.contains("inject")
|| detector.contains("sql")
|| detector.contains("xss")
|| detector.contains("secret")
|| detector.contains("credential")
|| detector.contains("command")
|| detector.contains("traversal")
|| detector.contains("ssrf")
|| detector.contains("taint")
|| finding.cwe_id.is_some()
}
fn is_test_file(&self, path: &str) -> bool {
let lower = path.to_lowercase();
lower.contains("/test/")
|| lower.contains("/tests/")
|| lower.contains("/__tests__/")
|| lower.contains("/spec/")
|| lower.starts_with("test/")
|| lower.starts_with("tests/")
|| lower.ends_with("_test.go")
|| lower.ends_with("_test.py")
|| lower.ends_with("_test.rs")
|| lower.ends_with(".test.ts")
|| lower.ends_with(".test.js")
|| lower.ends_with(".spec.ts")
|| lower.ends_with(".spec.js")
}
fn calculate_grade(&self, score: f64, _findings: &[Finding]) -> String {
let base_grade = if score >= 97.0 {
"A+"
} else if score >= 93.0 {
"A"
} else if score >= 90.0 {
"A-"
} else if score >= 87.0 {
"B+"
} else if score >= 83.0 {
"B"
} else if score >= 80.0 {
"B-"
} else if score >= 77.0 {
"C+"
} else if score >= 73.0 {
"C"
} else if score >= 70.0 {
"C-"
} else if score >= 67.0 {
"D+"
} else if score >= 63.0 {
"D"
} else if score >= 60.0 {
"D-"
} else {
"F"
};
base_grade.to_string()
}
pub fn explain(&self, breakdown: &ScoreBreakdown) -> String {
let mut lines = Vec::new();
let m = &breakdown.graph_metrics;
let kloc = m.total_loc as f64 / 1000.0;
lines.push(format!(
"# Health Score: {:.1} ({})\n",
breakdown.overall_score, breakdown.grade
));
lines.push("## Scoring Formula\n".to_string());
lines.push("```".to_string());
lines.push("Overall = Structure × 0.33 + Quality × 0.34 + Architecture × 0.33".to_string());
lines.push("Pillar = (100 - penalties) + graph_bonuses".to_string());
lines.push(format!("Penalty = severity_weight × 5.0 / kLOC (kLOC = {:.1})", kloc));
lines.push("```\n".to_string());
lines.push("Severity weights: Critical=8.0, High=4.0, Medium=1.0, Low=0.2\n".to_string());
lines.push("## Graph Analysis\n".to_string());
lines.push(format!("- **Lines of code**: {} ({:.1} kLOC)", m.total_loc, kloc));
lines.push(format!("- **Modules**: {}", m.module_count));
lines.push(format!(
"- **Coupling**: {:.1}% cross-module calls (lower is better)",
m.avg_coupling * 100.0
));
lines.push(format!(
"- **Cohesion**: {:.1}% intra-module calls (higher is better)",
m.avg_cohesion * 100.0
));
lines.push(format!(
"- **Cycles**: {} circular dependencies",
m.cycle_count
));
lines.push(format!(
"- **Simple functions**: {:.1}% have complexity ≤ 10",
m.simple_function_ratio * 100.0
));
lines.push(format!(
"- **Test files**: {:.1}%\n",
m.test_file_ratio * 100.0
));
for pillar in [
&breakdown.structure,
&breakdown.quality,
&breakdown.architecture,
] {
lines.push(format!(
"## {} Score: {:.1}\n",
pillar.name, pillar.final_score
));
lines.push(format!(
"- Base: 100 - {:.2} penalties = {:.1}",
pillar.penalty_points, pillar.base_score
));
let total_bonus: f64 = pillar.bonuses.iter().map(|(_, v)| v).sum::<f64>() * 100.0;
let capped = if pillar.penalty_points > 0.0 {
total_bonus.min(pillar.penalty_points * 0.5)
} else {
total_bonus
};
let active_bonuses: Vec<_> = pillar.bonuses.iter().filter(|(_, v)| *v > 0.001).collect();
if !active_bonuses.is_empty() {
lines.push("- Bonuses (additive, capped at 50% of penalty):".to_string());
for (name, value) in &active_bonuses {
lines.push(format!(" - {}: +{:.1} pts", name, value * 100.0));
}
if capped < total_bonus {
lines.push(format!(" - *(capped from {:.1} to {:.1} pts)*", total_bonus, capped));
}
}
lines.push(format!("- Final: {:.1}", pillar.final_score));
lines.push(format!("- Findings: {}\n", pillar.finding_count));
}
lines.join("\n")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::graph::GraphStore;
#[test]
fn test_empty_codebase() {
let graph = GraphStore::in_memory();
let config = ProjectConfig::default();
let scorer = GraphScorer::new(&graph, &config);
let breakdown = scorer.calculate(&[]);
assert!(breakdown.overall_score >= 90.0);
}
#[test]
fn test_critical_finding_caps_grade() {
let graph = GraphStore::in_memory();
let config = ProjectConfig::default();
let scorer = GraphScorer::new(&graph, &config);
let findings = vec![Finding {
severity: Severity::Critical,
detector: "test".to_string(),
title: "Critical issue".to_string(),
..Default::default()
}];
let breakdown = scorer.calculate(&findings);
assert!(
breakdown.grade.starts_with('A')
|| breakdown.grade.starts_with('B')
|| breakdown.grade.starts_with('C'),
"Expected reasonable grade, got {}",
breakdown.grade
);
}
#[test]
fn test_graph_bonuses() {
let graph = GraphStore::in_memory();
use crate::graph::CodeNode;
graph.add_node(CodeNode::file("src/main.rs"));
graph.add_node(CodeNode::file("src/lib.rs"));
graph.add_node(CodeNode::file("tests/test_main.rs")); graph.add_node(CodeNode::function("main", "src/main.rs").with_property("complexity", 5i64));
graph
.add_node(CodeNode::function("helper", "src/lib.rs").with_property("complexity", 3i64));
graph.add_node(
CodeNode::function("test_main", "tests/test_main.rs").with_property("complexity", 2i64),
);
let config = ProjectConfig::default();
let scorer = GraphScorer::new(&graph, &config);
let metrics = scorer.compute_graph_metrics();
assert_eq!(metrics.total_files, 3);
assert_eq!(metrics.total_functions, 3);
assert!(
(metrics.test_file_ratio - 0.333).abs() < 0.01,
"test_file_ratio={}",
metrics.test_file_ratio
);
assert_eq!(metrics.simple_function_ratio, 1.0); }
}