use std::path::Path;
use crate::cogcom;
use crate::cycom;
use crate::dups;
use crate::hal;
use crate::indent;
use crate::loc::counter::LineKind;
use crate::miv;
use crate::util::{find_test_block_start, read_and_classify};
use super::ScoringModel;
pub struct FileMetrics {
pub path: std::path::PathBuf,
pub code_lines: usize,
pub max_cognitive: Option<usize>,
pub mi_score: Option<f64>,
pub max_complexity: Option<usize>,
pub indent_stddev: Option<f64>,
pub halstead_effort: Option<f64>,
}
pub struct SingleFileResult {
pub metrics: FileMetrics,
pub dup_file: dups::detector::NormalizedFile,
pub normalized_count: usize,
}
pub fn analyze_single_file(
file_path: &Path,
spec: &crate::loc::language::LanguageSpec,
exclude_tests: bool,
model: &ScoringModel,
) -> Option<SingleFileResult> {
let (lines, kinds) = match read_and_classify(file_path, spec) {
Ok(Some(v)) => v,
Ok(None) => return None,
Err(e) => {
eprintln!("warning: {}: {e}", file_path.display());
return None;
}
};
let code_lines = kinds.iter().filter(|k| **k == LineKind::Code).count();
let comment_lines = kinds.iter().filter(|k| **k == LineKind::Comment).count();
let indent_stddev = indent::analyzer::analyze(&lines, &kinds, 4).map(|m| m.stddev);
let hal_metrics = hal::analyze_content(&lines, &kinds, spec);
let halstead_effort = hal_metrics.as_ref().map(|h| h.effort);
let (max_cognitive, mi_score, max_complexity) = match model {
ScoringModel::Cognitive => {
let cogcom_result = cogcom::analyze_content(&lines, &kinds, spec);
let max_cog = cogcom_result.as_ref().map(|c| c.max_complexity);
(max_cog, None, None)
}
ScoringModel::Legacy => {
let cycom_result = cycom::analyze_content(&lines, &kinds, spec);
let max_cycom = cycom_result.as_ref().map(|c| c.max_complexity);
let mi = hal_metrics.as_ref().and_then(|h| {
let complexity = cycom_result
.as_ref()
.map(|c| c.total_complexity)
.unwrap_or(1);
miv::analyzer::compute_mi(h.volume, complexity, code_lines, comment_lines)
.map(|m| m.mi_score)
});
(None, mi, max_cycom)
}
};
let has_model_metric = match model {
ScoringModel::Cognitive => max_cognitive.is_some(),
ScoringModel::Legacy => mi_score.is_some() || max_complexity.is_some(),
};
if !has_model_metric {
return None;
}
let dup_end = if exclude_tests {
find_test_block_start(&lines)
} else {
lines.len()
};
let normalized = dups::normalize_content(&lines[..dup_end], &kinds[..dup_end]);
let normalized_count = normalized.len();
Some(SingleFileResult {
metrics: FileMetrics {
path: file_path.to_path_buf(),
code_lines,
max_cognitive,
mi_score,
max_complexity,
indent_stddev,
halstead_effort,
},
dup_file: dups::detector::NormalizedFile {
path: file_path.to_path_buf(),
lines: normalized,
},
normalized_count,
})
}