pub(crate) mod analyzer;
mod collector;
pub(crate) mod diff;
mod diff_report;
mod normalize;
mod report;
mod scoring;
use std::error::Error;
use crate::dups;
use crate::git::GitRepo;
use crate::walk::WalkConfig;
use analyzer::{FileScore, ProjectScore, compute_project_score, score_to_grade};
use collector::{FileMetrics, analyze_single_file};
use report::{print_json, print_report};
use scoring::{build_dimensions, build_empty_dimensions, score_file};
#[derive(Debug, Clone, Copy, Default)]
pub struct ScoreGate {
pub fail_if_worse: bool,
pub fail_below: Option<analyzer::Grade>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ScoringModel {
Cognitive,
Legacy,
}
impl ScoringModel {
pub fn from_arg(s: &str) -> Self {
match s {
"legacy" => Self::Legacy,
"cogcom" => Self::Cognitive,
other => {
debug_assert!(false, "unexpected scoring model: {other}");
Self::Cognitive
}
}
}
}
pub fn run(
cfg: &WalkConfig<'_>,
json: bool,
bottom: usize,
min_lines: usize,
model: &str,
) -> Result<(), Box<dyn Error>> {
let scoring_model = ScoringModel::from_arg(model);
let score = compute_score(cfg, bottom, min_lines, &scoring_model)?;
let target = cfg
.path
.to_str()
.filter(|s| *s != ".")
.map(|s| s.to_string());
if json {
print_json(&score, target.as_deref())?;
} else {
print_report(&score, bottom, target.as_deref());
}
Ok(())
}
pub fn run_diff(
cfg: &WalkConfig<'_>,
git_ref: &str,
json: bool,
bottom: usize,
min_lines: usize,
model: &str,
gate: ScoreGate,
) -> Result<(), Box<dyn Error>> {
let scoring_model = ScoringModel::from_arg(model);
let after = compute_score(cfg, bottom, min_lines, &scoring_model)?;
let repo = GitRepo::open(cfg.path)?;
let tmpdir = tempfile::tempdir()?;
repo.extract_tree_to_dir(git_ref, tmpdir.path())?;
let (_, prefix) = repo.walk_prefix(cfg.path)?;
let tmp_path = if prefix.as_os_str().is_empty() {
tmpdir.path().to_path_buf()
} else {
tmpdir.path().join(&prefix)
};
let ref_cfg = WalkConfig::new(&tmp_path, cfg.include_tests, cfg.filter);
let before = compute_score(&ref_cfg, bottom, min_lines, &scoring_model)?;
let score_diff = diff::compute_diff(git_ref, &before, &after);
if json {
diff_report::print_json(&score_diff)?;
} else {
diff_report::print_report(&score_diff);
}
if gate.fail_if_worse && score_diff.overall.delta < 0.0 {
return Err(format!(
"quality gate failed: score dropped {:.1} → {:.1} ({:+.1})",
score_diff.overall.before, score_diff.overall.after, score_diff.overall.delta
)
.into());
}
if let Some(threshold) = gate.fail_below
&& score_diff.after_grade.numeric_rank() < threshold.numeric_rank()
{
return Err(format!(
"quality gate failed: score {} is below minimum threshold {}",
score_diff.after_grade, threshold
)
.into());
}
Ok(())
}
fn compute_score(
cfg: &WalkConfig<'_>,
bottom: usize,
min_lines: usize,
model: &ScoringModel,
) -> Result<ProjectScore, Box<dyn Error>> {
let exclude_tests = cfg.exclude_tests();
let mut file_metrics: Vec<FileMetrics> = Vec::new();
let mut dup_files: Vec<dups::detector::NormalizedFile> = Vec::new();
let mut total_code_lines: usize = 0;
for (file_path, spec) in cfg.source_files() {
if let Some(result) = analyze_single_file(&file_path, spec, exclude_tests, model) {
total_code_lines += result.normalized_count;
dup_files.push(result.dup_file);
file_metrics.push(result.metrics);
}
}
let dup_groups = if dup_files.is_empty() {
Vec::new()
} else {
dups::detector::detect_duplicates(&dup_files, min_lines, true)
};
let duplicated_lines: usize = dup_groups.iter().map(|g| g.duplicated_lines()).sum();
let dup_percent = if total_code_lines == 0 {
0.0
} else {
duplicated_lines as f64 / total_code_lines as f64 * 100.0
};
let total_loc: usize = file_metrics.iter().map(|f| f.code_lines).sum();
let files_analyzed = file_metrics.len();
if files_analyzed == 0 {
let dimensions = build_empty_dimensions(model);
return Ok(ProjectScore {
score: 0.0,
grade: score_to_grade(0.0),
files_analyzed: 0,
total_loc: 0,
dimensions,
needs_attention: vec![],
});
}
let dimensions = build_dimensions(&file_metrics, total_loc, dup_percent, model);
let project_score = compute_project_score(&dimensions);
let mut file_scores: Vec<FileScore> =
file_metrics.iter().map(|f| score_file(f, model)).collect();
file_scores.sort_by(|a, b| a.score.total_cmp(&b.score));
file_scores.truncate(bottom);
Ok(ProjectScore {
score: project_score,
grade: score_to_grade(project_score),
files_analyzed,
total_loc,
dimensions,
needs_attention: file_scores,
})
}
#[cfg(test)]
#[path = "mod_test.rs"]
mod tests;