use std::fs;
use std::path::PathBuf;
use crate::complexity::{build_complexity_report, generate_complexity_histogram};
use proptest::prelude::*;
use tokmd_analysis_types::AnalysisLimits;
use tokmd_analysis_types::{ComplexityRisk, FileComplexity};
use tokmd_types::{ChildIncludeMode, ExportData, FileKind, FileRow};
fn make_row(path: &str, lang: &str, code: usize) -> FileRow {
FileRow {
path: path.to_string(),
module: "src".to_string(),
lang: lang.to_string(),
kind: FileKind::Parent,
code,
comments: 0,
blanks: 0,
lines: code,
bytes: code * 40,
tokens: code * 8,
}
}
fn make_export(rows: Vec<FileRow>) -> ExportData {
ExportData {
rows,
module_roots: vec![],
module_depth: 1,
children: ChildIncludeMode::Separate,
}
}
fn write_temp_files(files: &[(&str, &str)]) -> (tempfile::TempDir, Vec<PathBuf>) {
let dir = tempfile::tempdir().expect("create tempdir");
let mut paths = Vec::new();
for (rel, content) in files {
let full = dir.path().join(rel);
if let Some(parent) = full.parent() {
fs::create_dir_all(parent).unwrap();
}
fs::write(&full, content).unwrap();
paths.push(PathBuf::from(rel));
}
(dir, paths)
}
fn analyze(files: &[(&str, &str, &str)], detail: bool) -> tokmd_analysis_types::ComplexityReport {
let file_entries: Vec<(&str, &str)> = files.iter().map(|(p, _, c)| (*p, *c)).collect();
let (dir, paths) = write_temp_files(&file_entries);
let rows: Vec<FileRow> = files
.iter()
.map(|(p, lang, c)| make_row(p, lang, c.lines().count()))
.collect();
let export = make_export(rows);
build_complexity_report(
dir.path(),
&paths,
&export,
&AnalysisLimits::default(),
detail,
)
.unwrap()
}
mod cyclomatic_known {
use super::*;
#[test]
fn linear_function_base_complexity() {
let code = "fn main() {\n let x = 1;\n let y = 2;\n println!(\"{}\", x + y);\n}\n";
let r = analyze(&[("main.rs", "Rust", code)], false);
assert_eq!(r.files.len(), 1);
assert_eq!(r.files[0].cyclomatic_complexity, 1);
}
#[test]
fn single_if_adds_one() {
let code = "fn check(x: i32) {\n if x > 0 {\n println!(\"pos\");\n }\n}\n";
let r = analyze(&[("check.rs", "Rust", code)], false);
assert_eq!(r.files[0].cyclomatic_complexity, 2);
}
#[test]
fn multiple_branches() {
let code = r#"fn classify(x: i32) {
if x > 0 {
if x > 100 {
println!("big");
}
} else {
println!("non-positive");
}
}
"#;
let r = analyze(&[("classify.rs", "Rust", code)], false);
assert_eq!(r.files[0].cyclomatic_complexity, 3);
}
#[test]
fn match_expression() {
let code = r#"fn describe(x: i32) {
match x {
0 => println!("zero"),
1 => println!("one"),
_ => println!("other"),
}
}
"#;
let r = analyze(&[("describe.rs", "Rust", code)], false);
assert_eq!(r.files[0].cyclomatic_complexity, 2);
}
#[test]
fn loop_and_condition() {
let code = r#"fn process(items: &[i32]) {
for item in items {
if *item > 0 {
println!("{}", item);
}
}
}
"#;
let r = analyze(&[("proc.rs", "Rust", code)], false);
assert_eq!(r.files[0].cyclomatic_complexity, 3);
}
#[test]
fn python_complexity() {
let code = "def f(x):\n if x > 0:\n return x\n elif x == 0:\n return 0\n else:\n return -x\n";
let r = analyze(&[("f.py", "Python", code)], false);
assert_eq!(r.files[0].cyclomatic_complexity, 4);
}
}
mod cognitive {
use super::*;
#[test]
fn function_details_extracted_for_rust() {
let code = r#"fn alpha() {
let x = 1;
}
fn beta(a: i32) -> i32 {
if a > 0 { a } else { -a }
}
"#;
let r = analyze(&[("funcs.rs", "Rust", code)], true);
assert_eq!(r.total_functions, 2);
let funcs = r.files[0].functions.as_ref().unwrap();
assert_eq!(funcs.len(), 2);
}
#[test]
fn cognitive_complexity_non_negative_for_nested_code() {
let code = r#"fn nested(x: i32) {
if x > 0 {
for i in 0..x {
if i % 2 == 0 {
while i > 0 {
break;
}
}
}
}
}
"#;
let r = analyze(&[("nested.rs", "Rust", code)], false);
if let Some(cog) = r.files[0].cognitive_complexity {
assert!(cog > 0, "nested code should have cognitive complexity > 0");
}
}
}
mod edge_cases {
use super::*;
#[test]
fn empty_file() {
let r = analyze(&[("empty.rs", "Rust", "")], false);
assert_eq!(r.files.len(), 1);
assert_eq!(r.files[0].function_count, 0);
assert_eq!(r.files[0].cyclomatic_complexity, 1); }
#[test]
fn comment_only_file() {
let code = "// This is a comment\n// Another comment\n// Third line\n";
let r = analyze(&[("comments.rs", "Rust", code)], false);
assert_eq!(r.files[0].function_count, 0);
assert_eq!(r.files[0].cyclomatic_complexity, 1);
}
#[test]
fn unsupported_language_skipped() {
let code = "some content\n";
let file_entries = vec![("readme.md", code)];
let (dir, paths) = write_temp_files(&file_entries);
let rows = vec![make_row("readme.md", "Markdown", 1)];
let export = make_export(rows);
let r = build_complexity_report(
dir.path(),
&paths,
&export,
&AnalysisLimits::default(),
false,
)
.unwrap();
assert!(r.files.is_empty());
}
#[test]
fn no_files_produces_zero_aggregates() {
let r = build_complexity_report(
std::path::Path::new("."),
&[],
&make_export(vec![]),
&AnalysisLimits::default(),
false,
)
.unwrap();
assert_eq!(r.total_functions, 0);
assert_eq!(r.avg_cyclomatic, 0.0);
assert_eq!(r.max_cyclomatic, 0);
}
}
fn arb_risk() -> impl Strategy<Value = ComplexityRisk> {
prop_oneof![
Just(ComplexityRisk::Low),
Just(ComplexityRisk::Moderate),
Just(ComplexityRisk::High),
Just(ComplexityRisk::Critical),
]
}
fn arb_file_complexity() -> impl Strategy<Value = FileComplexity> {
(
"[a-z]{1,8}\\.rs",
"[a-z]{1,5}",
0..200usize,
0..500usize,
0..500usize,
proptest::option::of(0..300usize),
proptest::option::of(0..20usize),
arb_risk(),
)
.prop_map(
|(path, module, fc, mfl, cc, cog, nest, risk)| FileComplexity {
path,
module,
function_count: fc,
max_function_length: mfl,
cyclomatic_complexity: cc,
cognitive_complexity: cog,
max_nesting: nest,
risk_level: risk,
functions: None,
},
)
}
fn arb_file_vec() -> impl Strategy<Value = Vec<FileComplexity>> {
proptest::collection::vec(arb_file_complexity(), 0..50)
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(150))]
#[test]
fn prop_complexity_non_negative(
cc in 0..1000usize,
cog in proptest::option::of(0..500usize),
nest in proptest::option::of(0..20usize),
) {
prop_assert!(cc < 1000);
if let Some(c) = cog {
prop_assert!(c < 500);
}
if let Some(n) = nest {
prop_assert!(n < 20);
}
}
#[test]
fn prop_histogram_total_equals_file_count(files in arb_file_vec()) {
let hist = generate_complexity_histogram(&files, 5);
prop_assert_eq!(hist.total, files.len() as u32);
}
#[test]
fn prop_histogram_counts_sum_to_total(files in arb_file_vec()) {
let hist = generate_complexity_histogram(&files, 5);
let sum: u32 = hist.counts.iter().sum();
prop_assert_eq!(sum, hist.total);
}
#[test]
fn prop_histogram_seven_buckets(files in arb_file_vec()) {
let hist = generate_complexity_histogram(&files, 5);
prop_assert_eq!(hist.buckets.len(), 7);
prop_assert_eq!(hist.counts.len(), 7);
}
#[test]
fn prop_histogram_buckets_monotonic(
files in arb_file_vec(),
bucket_size in 1u32..20,
) {
let hist = generate_complexity_histogram(&files, bucket_size);
for w in hist.buckets.windows(2) {
prop_assert!(w[0] < w[1], "buckets must be strictly increasing");
}
}
#[test]
fn prop_single_file_lands_in_one_bucket(file in arb_file_complexity()) {
let hist = generate_complexity_histogram(&[file], 5);
let nonzero = hist.counts.iter().filter(|&&c| c > 0).count();
prop_assert_eq!(nonzero, 1);
}
#[test]
fn prop_histogram_deterministic(files in arb_file_vec()) {
let h1 = generate_complexity_histogram(&files, 5);
let h2 = generate_complexity_histogram(&files, 5);
prop_assert_eq!(h1.buckets, h2.buckets);
prop_assert_eq!(h1.counts, h2.counts);
prop_assert_eq!(h1.total, h2.total);
}
#[test]
fn prop_adding_file_never_decreases_bucket(
files in arb_file_vec(),
extra in arb_file_complexity(),
) {
let h_before = generate_complexity_histogram(&files, 5);
let mut extended = files;
extended.push(extra);
let h_after = generate_complexity_histogram(&extended, 5);
prop_assert_eq!(h_after.total, h_before.total + 1);
for (i, (&before, &after)) in h_before.counts.iter().zip(h_after.counts.iter()).enumerate() {
prop_assert!(
after >= before,
"bucket {i} decreased from {before} to {after}",
);
}
}
}