use super::*;
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn test_complexity_variance_default_values() {
let cv = ComplexityVariance {
mean: 0.0,
variance: 0.0,
gini: 0.0,
percentile_90: 0.0,
};
assert_eq!(cv.mean, 0.0);
assert_eq!(cv.variance, 0.0);
assert_eq!(cv.gini, 0.0);
assert_eq!(cv.percentile_90, 0.0);
}
#[test]
fn test_complexity_variance_positive_values() {
let cv = ComplexityVariance {
mean: 10.5,
variance: 25.0,
gini: 0.35,
percentile_90: 20.0,
};
assert_eq!(cv.mean, 10.5);
assert_eq!(cv.variance, 25.0);
assert_eq!(cv.gini, 0.35);
assert_eq!(cv.percentile_90, 20.0);
}
#[test]
fn test_complexity_variance_clone() {
let cv = ComplexityVariance {
mean: 5.0,
variance: 10.0,
gini: 0.2,
percentile_90: 15.0,
};
let cloned = cv.clone();
assert_eq!(cloned.mean, cv.mean);
assert_eq!(cloned.variance, cv.variance);
assert_eq!(cloned.gini, cv.gini);
assert_eq!(cloned.percentile_90, cv.percentile_90);
}
#[test]
fn test_complexity_variance_debug() {
let cv = ComplexityVariance {
mean: 5.0,
variance: 10.0,
gini: 0.2,
percentile_90: 15.0,
};
let debug = format!("{:?}", cv);
assert!(debug.contains("ComplexityVariance"));
assert!(debug.contains("mean"));
assert!(debug.contains("variance"));
}
#[test]
fn test_coupling_metrics_default_values() {
let cm = CouplingMetrics {
afferent: 0,
efferent: 0,
instability: 0.0,
};
assert_eq!(cm.afferent, 0);
assert_eq!(cm.efferent, 0);
assert_eq!(cm.instability, 0.0);
}
#[test]
fn test_coupling_metrics_with_dependencies() {
let cm = CouplingMetrics {
afferent: 5,
efferent: 10,
instability: 0.667,
};
assert_eq!(cm.afferent, 5);
assert_eq!(cm.efferent, 10);
assert!((cm.instability - 0.667).abs() < 0.001);
}
#[test]
fn test_coupling_metrics_clone() {
let cm = CouplingMetrics {
afferent: 3,
efferent: 7,
instability: 0.7,
};
let cloned = cm.clone();
assert_eq!(cloned.afferent, cm.afferent);
assert_eq!(cloned.efferent, cm.efferent);
assert_eq!(cloned.instability, cm.instability);
}
#[test]
fn test_coupling_metrics_debug() {
let cm = CouplingMetrics {
afferent: 3,
efferent: 7,
instability: 0.7,
};
let debug = format!("{:?}", cm);
assert!(debug.contains("CouplingMetrics"));
assert!(debug.contains("afferent"));
assert!(debug.contains("efferent"));
}
#[test]
fn test_tdg_calculator_new() {
let calc = TDGCalculator::new();
assert_eq!(calc.project_root, PathBuf::from("."));
}
#[test]
fn test_tdg_calculator_with_config() {
let config = TDGConfig {
complexity_weight: 0.5,
churn_weight: 0.2,
coupling_weight: 0.1,
domain_risk_weight: 0.1,
duplication_weight: 0.1,
..Default::default()
};
let calc = TDGCalculator::with_config(config.clone());
assert_eq!(calc.config.complexity_weight, 0.5);
assert_eq!(calc.config.churn_weight, 0.2);
}
#[test]
fn test_tdg_calculator_default() {
let calc = TDGCalculator::default();
assert_eq!(calc.project_root, PathBuf::from("."));
}
#[test]
fn test_tdg_calculator_clone() {
let calc = TDGCalculator::new();
let cloned = calc.clone();
assert_eq!(cloned.project_root, calc.project_root);
}
#[test]
fn test_tdg_components_default() {
let components = TDGComponents::default();
assert_eq!(components.complexity, 0.0);
assert_eq!(components.churn, 0.0);
assert_eq!(components.coupling, 0.0);
assert_eq!(components.domain_risk, 0.0);
assert_eq!(components.duplication, 0.0);
}
#[test]
fn test_tdg_components_with_values() {
let components = TDGComponents {
complexity: 2.5,
churn: 1.5,
coupling: 3.0,
domain_risk: 1.0,
duplication: 0.5,
dead_code: 0.0,
};
assert_eq!(components.complexity, 2.5);
assert_eq!(components.churn, 1.5);
assert_eq!(components.coupling, 3.0);
assert_eq!(components.domain_risk, 1.0);
assert_eq!(components.duplication, 0.5);
}
#[test]
fn test_calculate_weighted_tdg_zero_components() {
let calc = TDGCalculator::new();
let components = TDGComponents::default();
let result = calc.calculate_weighted_tdg(&components, 0.0);
assert_eq!(result, 0.0);
}
#[test]
fn test_calculate_weighted_tdg_with_provability() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 5.0,
churn: 5.0,
coupling: 5.0,
domain_risk: 5.0,
duplication: 5.0,
dead_code: 0.0,
};
let result_no_prov = calc.calculate_weighted_tdg(&components, 0.0);
let result_with_prov = calc.calculate_weighted_tdg(&components, 1.0);
assert!(result_with_prov < result_no_prov);
}
#[test]
fn test_calculate_weighted_tdg_clamped() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 10.0, churn: 10.0,
coupling: 10.0,
domain_risk: 10.0,
duplication: 10.0,
dead_code: 0.0,
};
let result = calc.calculate_weighted_tdg(&components, 0.0);
assert!(result <= 5.0, "TDG should be clamped to 5.0");
}
#[test]
fn test_calculate_confidence_full_data() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 2.0,
churn: 1.5,
coupling: 1.0,
domain_risk: 0.5,
duplication: 0.3,
dead_code: 0.0,
};
let confidence = calc.calculate_confidence(&components);
assert_eq!(confidence, 1.0, "Full data should have 100% confidence");
}
#[test]
fn test_calculate_confidence_missing_churn() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 2.0,
churn: 0.0, coupling: 1.0,
domain_risk: 0.5,
duplication: 0.3,
dead_code: 0.0,
};
let confidence = calc.calculate_confidence(&components);
assert!(confidence < 1.0, "Missing churn should reduce confidence");
assert!((confidence - 0.8).abs() < 0.01);
}
#[test]
fn test_calculate_confidence_missing_all() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 2.0, churn: 0.0,
coupling: 0.0,
domain_risk: 0.0,
duplication: 0.0,
dead_code: 0.0,
};
let confidence = calc.calculate_confidence(&components);
assert!((confidence - 0.684).abs() < 0.01);
}
#[test]
fn test_calculate_percentiles_single_score() {
let calc = TDGCalculator::new();
let mut scores = vec![TDGScore {
value: 2.5,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
}];
calc.calculate_percentiles(&mut scores);
assert_eq!(scores[0].percentile, 0.0);
}
#[test]
fn test_calculate_percentiles_multiple_scores() {
let calc = TDGCalculator::new();
let mut scores = vec![
TDGScore {
value: 1.0,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 2.0,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 3.0,
components: TDGComponents::default(),
severity: TDGSeverity::Warning,
percentile: 0.0,
confidence: 1.0,
},
];
calc.calculate_percentiles(&mut scores);
assert!(scores[0].percentile <= scores[1].percentile);
assert!(scores[1].percentile <= scores[2].percentile);
}
#[test]
fn test_percentile_empty() {
let calc = TDGCalculator::new();
let values: Vec<f64> = vec![];
let result = calc.percentile(&values, 0.5);
assert_eq!(result, 0.0);
}
#[test]
fn test_percentile_single_value() {
let calc = TDGCalculator::new();
let values = vec![5.0];
assert_eq!(calc.percentile(&values, 0.5), 5.0);
assert_eq!(calc.percentile(&values, 0.95), 5.0);
}
#[test]
fn test_percentile_multiple_values() {
let calc = TDGCalculator::new();
let values: Vec<f64> = (1..=100).map(|x| x as f64).collect();
let p50 = calc.percentile(&values, 0.5);
let p95 = calc.percentile(&values, 0.95);
let p99 = calc.percentile(&values, 0.99);
assert!(p50 < p95);
assert!(p95 < p99);
}
#[test]
fn test_identify_primary_factor_complexity() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 5.0,
churn: 1.0,
coupling: 1.0,
domain_risk: 1.0,
duplication: 1.0,
dead_code: 0.0,
};
let factor = calc.identify_primary_factor(&components);
assert_eq!(factor, "High Complexity");
}
#[test]
fn test_identify_primary_factor_churn() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 1.0,
churn: 5.0,
coupling: 1.0,
domain_risk: 1.0,
duplication: 1.0,
dead_code: 0.0,
};
let factor = calc.identify_primary_factor(&components);
assert_eq!(factor, "Frequent Changes");
}
#[test]
fn test_identify_primary_factor_coupling() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 1.0,
churn: 1.0,
coupling: 5.0,
domain_risk: 1.0,
duplication: 1.0,
dead_code: 0.0,
};
let factor = calc.identify_primary_factor(&components);
assert_eq!(factor, "High Coupling");
}
#[test]
fn test_identify_primary_factor_domain_risk() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 1.0,
churn: 1.0,
coupling: 1.0,
domain_risk: 5.0,
duplication: 1.0,
dead_code: 0.0,
};
let factor = calc.identify_primary_factor(&components);
assert_eq!(factor, "Domain Risk");
}
#[test]
fn test_identify_primary_factor_duplication() {
let calc = TDGCalculator::new();
let components = TDGComponents {
complexity: 1.0,
churn: 1.0,
coupling: 1.0,
domain_risk: 1.0,
duplication: 5.0,
dead_code: 0.0,
};
let factor = calc.identify_primary_factor(&components);
assert_eq!(factor, "Code Duplication");
}
#[test]
fn test_estimate_refactoring_hours_zero() {
let calc = TDGCalculator::new();
let hours = calc.estimate_refactoring_hours(0.0);
assert_eq!(hours, 2.0); }
#[test]
fn test_estimate_refactoring_hours_low() {
let calc = TDGCalculator::new();
let hours = calc.estimate_refactoring_hours(1.0);
assert!((hours - 3.6).abs() < 0.01);
}
#[test]
fn test_estimate_refactoring_hours_high() {
let calc = TDGCalculator::new();
let hours_low = calc.estimate_refactoring_hours(1.0);
let hours_high = calc.estimate_refactoring_hours(3.0);
assert!(hours_high > hours_low);
}
#[test]
fn test_estimate_refactoring_hours_increases_exponentially() {
let calc = TDGCalculator::new();
let hours_1 = calc.estimate_refactoring_hours(1.0);
let hours_2 = calc.estimate_refactoring_hours(2.0);
let hours_3 = calc.estimate_refactoring_hours(3.0);
let ratio_1_2 = hours_2 / hours_1;
let ratio_2_3 = hours_3 / hours_2;
assert!((ratio_1_2 - ratio_2_3).abs() < 0.01);
}
#[test]
fn test_generate_explanation_normal() {
let calc = TDGCalculator::new();
let score = TDGScore {
value: 1.5,
components: TDGComponents {
complexity: 2.0,
churn: 1.5,
coupling: 1.0,
domain_risk: 0.5,
duplication: 0.2,
dead_code: 0.0,
},
severity: TDGSeverity::Normal,
percentile: 50.0,
confidence: 0.9,
};
let explanation = calc.generate_explanation(&score);
assert!(explanation.contains("Code Quality Gradient"));
assert!(explanation.contains("Complexity"));
assert!(explanation.contains("Code Churn"));
assert!(explanation.contains("Coupling"));
assert!(explanation.contains("Domain Risk"));
assert!(explanation.contains("Duplication"));
assert!(explanation.contains("Confidence"));
}
#[test]
fn test_generate_explanation_critical() {
let calc = TDGCalculator::new();
let score = TDGScore {
value: 4.0,
components: TDGComponents {
complexity: 5.0,
churn: 4.0,
coupling: 3.5,
domain_risk: 2.0,
duplication: 1.5,
dead_code: 0.0,
},
severity: TDGSeverity::Critical,
percentile: 95.0,
confidence: 1.0,
};
let explanation = calc.generate_explanation(&score);
assert!(explanation.contains("critical"));
}
#[test]
fn test_count_imports_rust() {
let calc = TDGCalculator::new();
let content = r#"
use std::collections::HashMap;
use std::path::PathBuf;
use crate::models::tdg::TDGScore;
fn main() {}
"#;
let count = calc.count_imports(content);
assert_eq!(count, 3);
}
#[test]
fn test_count_imports_python() {
let calc = TDGCalculator::new();
let content = r#"
import os
import sys
from pathlib import Path
from typing import Dict, List
def main():
pass
"#;
let count = calc.count_imports(content);
assert_eq!(count, 4);
}
#[test]
fn test_count_imports_javascript() {
let calc = TDGCalculator::new();
let content = r#"
import React from 'react';
import { useState } from 'react';
const fs = require('fs');
function App() {}
"#;
let count = calc.count_imports(content);
assert_eq!(count, 2);
}
#[test]
fn test_count_imports_empty() {
let calc = TDGCalculator::new();
let content = "fn main() {}";
let count = calc.count_imports(content);
assert_eq!(count, 0);
}
#[test]
fn test_calculate_distribution_empty() {
let calc = TDGCalculator::new();
let scores: Vec<TDGScore> = vec![];
let distribution = calc.calculate_distribution(&scores);
assert_eq!(distribution.total_files, 0);
assert!(!distribution.buckets.is_empty());
for bucket in &distribution.buckets {
assert_eq!(bucket.count, 0);
}
}
#[test]
fn test_calculate_distribution_single() {
let calc = TDGCalculator::new();
let scores = vec![TDGScore {
value: 2.3,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
}];
let distribution = calc.calculate_distribution(&scores);
assert_eq!(distribution.total_files, 1);
let bucket = distribution
.buckets
.iter()
.find(|b| b.min == 2.0 && b.max == 2.5);
assert!(bucket.is_some());
assert_eq!(bucket.unwrap().count, 1);
assert_eq!(bucket.unwrap().percentage, 100.0);
}
#[test]
fn test_calculate_distribution_multiple_buckets() {
let calc = TDGCalculator::new();
let scores = vec![
TDGScore {
value: 0.3,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 1.2,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 2.7,
components: TDGComponents::default(),
severity: TDGSeverity::Warning,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 4.1,
components: TDGComponents::default(),
severity: TDGSeverity::Critical,
percentile: 0.0,
confidence: 1.0,
},
];
let distribution = calc.calculate_distribution(&scores);
assert_eq!(distribution.total_files, 4);
let total_percentage: f64 = distribution.buckets.iter().map(|b| b.percentage).sum();
assert!((total_percentage - 100.0).abs() < 0.01);
}
#[tokio::test]
async fn test_tdg_calculation() {
let calculator = TDGCalculator::new();
let temp_dir = TempDir::new().expect("internal error");
let test_file = temp_dir.path().join("test.rs");
tokio::fs::write(
&test_file,
r#"
fn complex_function(x: i32) -> i32 {
if x > 0 {
if x > 10 {
if x > 20 {
x * 2
} else {
x + 10
}
} else {
x + 5
}
} else {
0
}
}
"#,
)
.await
.expect("internal error");
let score_result = calculator.calculate_file(&test_file).await;
if let Err(e) = score_result {
if e.to_string().contains("git repository") {
return;
}
panic!("Unexpected error: {e}");
}
let score = score_result.expect("internal error");
assert!(score.value > 0.0);
assert!(score.value <= 5.0);
assert!(score.components.complexity > 0.0);
}
#[test]
fn test_tdg_distribution() {
let calculator = TDGCalculator::new();
let scores = vec![
TDGScore {
value: 0.5,
components: TDGComponents::default(),
severity: TDGSeverity::Normal,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 1.8,
components: TDGComponents::default(),
severity: TDGSeverity::Warning,
percentile: 0.0,
confidence: 1.0,
},
TDGScore {
value: 3.2,
components: TDGComponents::default(),
severity: TDGSeverity::Critical,
percentile: 0.0,
confidence: 1.0,
},
];
let distribution = calculator.calculate_distribution(&scores);
assert_eq!(distribution.total_files, 3);
assert!(!distribution.buckets.is_empty());
let total_percentage: f64 = distribution.buckets.iter().map(|b| b.percentage).sum();
assert!((total_percentage - 100.0).abs() < 0.01);
}
#[tokio::test]
async fn test_tdg_variance() {
let calculator = TDGCalculator::new();
let temp_dir = TempDir::new().expect("internal error");
let simple_file = temp_dir.path().join("simple.rs");
tokio::fs::write(
&simple_file,
r#"
fn simple() -> i32 {
42
}
"#,
)
.await
.expect("internal error");
let complex_file = temp_dir.path().join("complex.rs");
tokio::fs::write(
&complex_file,
r#"
fn complex(items: &[i32]) -> i32 {
let mut result = 0;
for item in items {
if *item > 0 {
if *item % 2 == 0 {
result += item;
} else {
result -= item;
}
} else if *item < -10 {
for i in 0..*item.abs() {
result *= 2;
}
}
}
result
}
"#,
)
.await
.expect("internal error");
let medium_file = temp_dir.path().join("medium.rs");
tokio::fs::write(
&medium_file,
r#"
fn medium(x: i32, y: i32) -> i32 {
if x > y {
x - y
} else {
y - x
}
}
"#,
)
.await
.expect("internal error");
let simple_result = calculator.calculate_file(&simple_file).await;
let complex_result = calculator.calculate_file(&complex_file).await;
let medium_result = calculator.calculate_file(&medium_file).await;
if let Err(e) = &simple_result {
if e.to_string().contains("git repository") {
return;
}
}
let simple_tdg = simple_result.expect("internal error");
let complex_tdg = complex_result.expect("internal error");
let medium_tdg = medium_result.expect("internal error");
assert_ne!(
simple_tdg.value, complex_tdg.value,
"Simple and complex files should have different TDG values"
);
assert_ne!(
simple_tdg.value, medium_tdg.value,
"Simple and medium files should have different TDG values"
);
assert_ne!(
complex_tdg.value, medium_tdg.value,
"Complex and medium files should have different TDG values"
);
assert!(
complex_tdg.value >= medium_tdg.value,
"Complex file TDG ({:.3}) should be >= medium TDG ({:.3})",
complex_tdg.value,
medium_tdg.value
);
let values = [simple_tdg.value, complex_tdg.value, medium_tdg.value];
let mean = values.iter().sum::<f64>() / values.len() as f64;
let variance = values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / values.len() as f64;
println!(
"TDG values: simple={:.3}, medium={:.3}, complex={:.3}",
simple_tdg.value, medium_tdg.value, complex_tdg.value
);
println!("Variance: {variance:.3}");
assert!(
variance > 0.01,
"TDG variance {variance:.3} too low - values too similar"
);
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod property_tests {
use proptest::prelude::*;
proptest! {
#[test]
fn basic_property_stability(_input in ".*") {
prop_assert!(true);
}
#[test]
fn module_consistency_check(_x in 0u32..1000) {
prop_assert!(_x < 1001);
}
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod simd_equivalence_tests {
use proptest::prelude::*;
const EPSILON: f64 = 1e-5;
fn complexity_vec(max_len: usize) -> impl Strategy<Value = Vec<u32>> {
prop::collection::vec(1u32..1000, 1..=max_len)
}
fn variance(values: &[u32]) -> f64 {
use aprender::primitives::Vector;
if values.is_empty() {
return 0.0;
}
let values_f32: Vec<f32> = values.iter().map(|&x| x as f32).collect();
let vec = Vector::from_slice(&values_f32);
vec.variance() as f64
}
fn gini(values: &[u32]) -> f64 {
use aprender::primitives::Vector;
if values.is_empty() {
return 0.0;
}
let sum: u32 = values.iter().sum();
if sum == 0 {
return 0.0;
}
let values_f32: Vec<f32> = values.iter().map(|&x| x as f32).collect();
let vec = Vector::from_slice(&values_f32);
vec.gini_coefficient() as f64
}
fn variance_scalar(values: &[u32]) -> f64 {
variance(values)
}
#[cfg(feature = "simd")]
fn variance_simd(values: &[u32]) -> f64 {
variance(values)
}
fn gini_scalar(values: &[u32]) -> f64 {
gini(values)
}
#[cfg(feature = "simd")]
fn gini_simd(values: &[u32]) -> f64 {
gini(values)
}
proptest! {
#[test]
fn variance_non_negative(values in complexity_vec(100)) {
let var = variance_scalar(&values);
prop_assert!(var >= 0.0, "Variance must be non-negative: {}", var);
}
#[test]
fn variance_zero_for_uniform(value in 1u32..1000) {
let values = vec![value; 10];
let var = variance_scalar(&values);
prop_assert!(var.abs() < EPSILON, "Uniform values should have zero variance: {}", var);
}
#[test]
fn gini_bounded(values in complexity_vec(100)) {
let gini = gini_scalar(&values);
prop_assert!((-1.0..=1.0).contains(&gini), "Gini should be bounded: {}", gini);
}
}
#[test]
#[cfg(feature = "simd")]
fn simd_variance_matches_scalar() {
use proptest::test_runner::{Config, TestRunner};
let mut runner = TestRunner::new(Config::with_cases(1000));
runner
.run(&complexity_vec(256), |values| {
let scalar_result = variance_scalar(&values);
let simd_result = variance_simd(&values);
let diff = (scalar_result - simd_result).abs();
let tolerance = scalar_result.abs() * 0.01 + 1e-3;
prop_assert!(
diff < tolerance,
"Variance mismatch: scalar={}, simd={}, diff={}",
scalar_result,
simd_result,
diff
);
Ok(())
})
.expect("SIMD variance must match scalar within tolerance");
}
#[test]
#[cfg(feature = "simd")]
fn simd_gini_matches_scalar() {
use proptest::test_runner::{Config, TestRunner};
let mut runner = TestRunner::new(Config::with_cases(1000));
runner
.run(&complexity_vec(256), |values| {
let scalar_result = gini_scalar(&values);
let simd_result = gini_simd(&values);
let diff = (scalar_result - simd_result).abs();
let tolerance = 0.01;
prop_assert!(
diff < tolerance,
"Gini mismatch: scalar={}, simd={}, diff={}",
scalar_result,
simd_result,
diff
);
Ok(())
})
.expect("SIMD Gini must match scalar within tolerance");
}
#[test]
#[cfg(feature = "simd")]
fn simd_handles_various_sizes() {
let test_cases = vec![
vec![1, 2, 3],
vec![10, 20, 30, 40, 50],
vec![1; 100],
vec![1, 100, 1, 100, 1, 100],
(1..=256).collect::<Vec<u32>>(),
];
for values in test_cases {
let scalar_var = variance_scalar(&values);
let simd_var = variance_simd(&values);
let var_diff = (scalar_var - simd_var).abs();
let scalar_gini = gini_scalar(&values);
let simd_gini = gini_simd(&values);
let gini_diff = (scalar_gini - simd_gini).abs();
assert!(
var_diff < scalar_var.abs() * 0.01 + 1e-3,
"Variance mismatch for size {}: scalar={}, simd={}",
values.len(),
scalar_var,
simd_var
);
assert!(
gini_diff < 0.01,
"Gini mismatch for size {}: scalar={}, simd={}",
values.len(),
scalar_gini,
simd_gini
);
}
}
#[test]
fn baseline_variance_performance() {
let values: Vec<u32> = (1..=1000).collect();
let var = variance_scalar(&values);
assert!(
var > 80000.0 && var < 84000.0,
"Variance out of expected range: {}",
var
);
}
#[test]
fn baseline_gini_performance() {
let values: Vec<u32> = (1..=1000).collect();
let gini = gini_scalar(&values);
assert!(
gini > 0.0 && gini < 1.0,
"Gini out of expected range: {}",
gini
);
}
use crate::models::tdg::{RecommendationType, TDGComponents, TDGScore, TDGSeverity};
use crate::services::tdg_calculator::TDGCalculator;
fn make_tdg_score(complexity: f64, churn: f64, coupling: f64, duplication: f64) -> TDGScore {
TDGScore {
value: complexity + churn + coupling + duplication,
components: TDGComponents {
complexity,
churn,
coupling,
domain_risk: 0.0,
duplication,
dead_code: 0.0,
},
severity: TDGSeverity::Normal,
percentile: 50.0,
confidence: 1.0,
}
}
#[tokio::test]
async fn test_generate_recommendations_all_below_thresholds() {
let calc = TDGCalculator::new();
let score = make_tdg_score(2.0, 2.0, 2.0, 1.0);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert!(
recs.is_empty(),
"Expected no recommendations when all components below thresholds, got {}",
recs.len()
);
}
#[tokio::test]
async fn test_generate_recommendations_only_complexity_above() {
let calc = TDGCalculator::new();
let score = make_tdg_score(4.0, 1.0, 1.0, 0.5);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(recs.len(), 1);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::ReduceComplexity
);
assert_eq!(recs[0].priority, 5);
assert_eq!(recs[0].estimated_hours, 4.0);
let expected = 4.0 * 0.3 * 0.25;
assert!(
(recs[0].expected_reduction - expected).abs() < 1e-10,
"Expected reduction {}, got {}",
expected,
recs[0].expected_reduction
);
}
#[tokio::test]
async fn test_generate_recommendations_all_above_thresholds() {
let calc = TDGCalculator::new();
let score = make_tdg_score(4.5, 3.5, 3.2, 2.5);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(
recs.len(),
4,
"Expected 4 recommendations, got {}",
recs.len()
);
assert_eq!(recs[0].priority, 5);
assert_eq!(recs[1].priority, 4);
assert_eq!(recs[2].priority, 3);
assert_eq!(recs[3].priority, 2);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::ReduceComplexity
);
assert_eq!(
recs[1].recommendation_type,
RecommendationType::StabilizeChurn
);
assert_eq!(
recs[2].recommendation_type,
RecommendationType::ReduceCoupling
);
assert_eq!(
recs[3].recommendation_type,
RecommendationType::RemoveDuplication
);
}
#[tokio::test]
async fn test_generate_recommendations_complexity_and_duplication_only() {
let calc = TDGCalculator::new();
let score = make_tdg_score(3.5, 2.0, 2.5, 3.0);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(
recs.len(),
2,
"Expected 2 recommendations, got {}",
recs.len()
);
assert_eq!(recs[0].priority, 5);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::ReduceComplexity
);
assert_eq!(recs[1].priority, 2);
assert_eq!(
recs[1].recommendation_type,
RecommendationType::RemoveDuplication
);
}
#[tokio::test]
async fn test_generate_recommendations_at_exact_thresholds() {
let calc = TDGCalculator::new();
let score = make_tdg_score(3.0, 3.0, 3.0, 2.0);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert!(
recs.is_empty(),
"Expected no recommendations at exact thresholds (not >), got {}",
recs.len()
);
}
#[tokio::test]
async fn test_generate_recommendations_churn_reduction_calculation() {
let calc = TDGCalculator::new();
let score = make_tdg_score(1.0, 4.0, 1.0, 0.5);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(recs.len(), 1);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::StabilizeChurn
);
assert_eq!(recs[0].priority, 4);
assert_eq!(recs[0].estimated_hours, 8.0);
let expected = 4.0 * 0.4 * 0.20;
assert!(
(recs[0].expected_reduction - expected).abs() < 1e-10,
"Expected reduction {}, got {}",
expected,
recs[0].expected_reduction
);
}
#[tokio::test]
async fn test_generate_recommendations_coupling_reduction_calculation() {
let calc = TDGCalculator::new();
let score = make_tdg_score(1.0, 1.0, 4.0, 0.5);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(recs.len(), 1);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::ReduceCoupling
);
assert_eq!(recs[0].priority, 3);
assert_eq!(recs[0].estimated_hours, 6.0);
let expected = 4.0 * 0.35 * 0.15;
assert!(
(recs[0].expected_reduction - expected).abs() < 1e-10,
"Expected reduction {}, got {}",
expected,
recs[0].expected_reduction
);
}
#[tokio::test]
async fn test_generate_recommendations_duplication_reduction_calculation() {
let calc = TDGCalculator::new();
let score = make_tdg_score(1.0, 1.0, 1.0, 3.0);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(recs.len(), 1);
assert_eq!(
recs[0].recommendation_type,
RecommendationType::RemoveDuplication
);
assert_eq!(recs[0].priority, 2);
assert_eq!(recs[0].estimated_hours, 3.0);
let expected = 3.0 * 0.5 * 0.10;
assert!(
(recs[0].expected_reduction - expected).abs() < 1e-10,
"Expected reduction {}, got {}",
expected,
recs[0].expected_reduction
);
}
#[tokio::test]
async fn test_generate_recommendations_action_strings() {
let calc = TDGCalculator::new();
let score = make_tdg_score(4.0, 4.0, 4.0, 3.0);
let path = std::path::Path::new("test.rs");
let recs = calc.generate_recommendations(&score, path).await.unwrap();
assert_eq!(recs.len(), 4);
assert_eq!(
recs[0].action,
"Extract complex logic into smaller, focused functions"
);
assert_eq!(
recs[1].action,
"Add comprehensive tests to stabilize frequently changing code"
);
assert_eq!(
recs[2].action,
"Introduce abstractions to reduce direct dependencies"
);
assert_eq!(
recs[3].action,
"Extract duplicated code into shared utilities"
);
}
}