}
#[test]
fn test_metric_value_without_threshold() {
let metric = MetricValue {
value: 42.5,
unit: "lines".to_string(),
trend: Trend::Unknown,
threshold: None,
};
let json = serde_json::to_string(&metric);
assert!(json.is_ok());
}
#[test]
fn test_health_score_with_zero_functions() {
let service = EnhancedReportingService::default();
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 50,
total_lines: 10000,
complexity_analysis: Some(ComplexityAnalysis {
total_cyclomatic: 0,
total_cognitive: 0,
functions: 0, max_cyclomatic: 0,
high_complexity_functions: 0,
distribution: vec![],
}),
dead_code_analysis: None,
duplication_analysis: None,
tdg_analysis: None,
big_o_analysis: None,
};
let service = std::panic::AssertUnwindSafe(service);
let results = std::panic::AssertUnwindSafe(results);
let result = std::panic::catch_unwind(|| service.calculate_health_score(&results));
assert!(result.is_err() || result.is_ok());
}
#[test]
fn test_health_score_with_zero_total_lines() {
let service = EnhancedReportingService::default();
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 0,
total_lines: 0, complexity_analysis: None,
dead_code_analysis: Some(DeadCodeAnalysis {
dead_lines: 0,
dead_functions: 0,
dead_code_percentage: 0.0,
}),
duplication_analysis: None,
tdg_analysis: None,
big_o_analysis: None,
};
let service = std::panic::AssertUnwindSafe(service);
let results = std::panic::AssertUnwindSafe(results);
let result = std::panic::catch_unwind(|| service.calculate_health_score(&results));
assert!(result.is_err() || result.is_ok());
}
#[tokio::test]
async fn test_markdown_format_with_empty_findings() {
let service = EnhancedReportingService::default();
let report = UnifiedAnalysisReport {
metadata: ReportMetadata {
project_name: "empty-project".to_string(),
project_path: "/empty".to_string(),
report_date: "2024-01-01".to_string(),
tool_version: "1.0.0".to_string(),
analysis_duration: 1.0,
analyzed_files: 0,
total_lines: 0,
},
executive_summary: ExecutiveSummary {
overall_health_score: 100.0,
critical_issues: 0,
high_priority_issues: 0,
key_findings: Vec::new(),
risk_assessment: RiskLevel::Low,
},
sections: Vec::new(),
recommendations: Vec::new(),
visualizations: Vec::new(),
};
let formatted = service.format_report(&report, ReportFormat::Markdown).await;
assert!(formatted.is_ok());
let md = formatted.unwrap();
assert!(md.contains("empty-project"));
}
#[tokio::test]
async fn test_markdown_format_with_all_priorities() {
let service = EnhancedReportingService::default();
let report = UnifiedAnalysisReport {
metadata: ReportMetadata {
project_name: "test".to_string(),
project_path: "/test".to_string(),
report_date: "2024-01-01".to_string(),
tool_version: "1.0.0".to_string(),
analysis_duration: 1.0,
analyzed_files: 10,
total_lines: 1000,
},
executive_summary: ExecutiveSummary {
overall_health_score: 50.0,
critical_issues: 1,
high_priority_issues: 2,
key_findings: vec!["Critical issue found".to_string()],
risk_assessment: RiskLevel::High,
},
sections: Vec::new(),
recommendations: vec![
Recommendation {
priority: Priority::Critical,
category: "Security".to_string(),
title: "Critical security fix".to_string(),
description: "Fix immediately".to_string(),
expected_impact: "Prevent breach".to_string(),
effort: EffortLevel::Hard,
related_findings: vec![],
},
Recommendation {
priority: Priority::High,
category: "Performance".to_string(),
title: "Performance improvement".to_string(),
description: "Optimize".to_string(),
expected_impact: "Faster".to_string(),
effort: EffortLevel::Medium,
related_findings: vec![],
},
Recommendation {
priority: Priority::Medium,
category: "Quality".to_string(),
title: "Code quality".to_string(),
description: "Improve".to_string(),
expected_impact: "Better".to_string(),
effort: EffortLevel::Easy,
related_findings: vec![],
},
Recommendation {
priority: Priority::Low,
category: "Style".to_string(),
title: "Style fix".to_string(),
description: "Minor".to_string(),
expected_impact: "Cleaner".to_string(),
effort: EffortLevel::Trivial,
related_findings: vec![],
},
],
visualizations: Vec::new(),
};
let formatted = service.format_report(&report, ReportFormat::Markdown).await;
assert!(formatted.is_ok());
let md = formatted.unwrap();
assert!(md.contains("CRITICAL"));
assert!(md.contains("HIGH"));
assert!(md.contains("MEDIUM"));
assert!(md.contains("LOW"));
}
}
mod enhanced_property_tests {
use super::*;
use proptest::prelude::*;
use std::time::Duration;
fn complexity_analysis_strategy() -> impl Strategy<Value = ComplexityAnalysis> {
(
1u32..10000, 1u32..20000, 1usize..500, 1u32..100, 0usize..100, )
.prop_map(|(tc, tcog, f, mc, hcf)| ComplexityAnalysis {
total_cyclomatic: tc,
total_cognitive: tcog,
functions: f,
max_cyclomatic: mc,
high_complexity_functions: hcf,
distribution: vec![10, 20, 15, 10, 5],
})
}
fn dead_code_analysis_strategy() -> impl Strategy<Value = DeadCodeAnalysis> {
(
0usize..10000, 0usize..500, 0.0f64..100.0, )
.prop_map(|(dl, df, dcp)| DeadCodeAnalysis {
dead_lines: dl,
dead_functions: df,
dead_code_percentage: dcp,
})
}
fn duplication_analysis_strategy() -> impl Strategy<Value = DuplicationAnalysis> {
(
0usize..10000, 0usize..500, 0.0f64..100.0, )
.prop_map(|(dl, db, dp)| DuplicationAnalysis {
duplicated_lines: dl,
duplicate_blocks: db,
duplication_percentage: dp,
})
}
fn tdg_analysis_strategy() -> impl Strategy<Value = TdgAnalysis> {
(
0.0f64..20.0, 0.0f64..30.0, 0usize..100, )
.prop_map(|(avg, max, htf)| {
TdgAnalysis {
average_tdg: avg,
max_tdg: max.max(avg), high_tdg_files: htf,
}
})
}
proptest! {
#[test]
fn prop_health_score_bounded(
complexity in prop::option::of(complexity_analysis_strategy()),
dead_code in prop::option::of(dead_code_analysis_strategy()),
duplication in prop::option::of(duplication_analysis_strategy()),
tdg in prop::option::of(tdg_analysis_strategy()),
total_lines in 1000usize..100000,
) {
let service = EnhancedReportingService::default();
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 50,
total_lines,
complexity_analysis: complexity,
dead_code_analysis: dead_code,
duplication_analysis: duplication,
tdg_analysis: tdg,
big_o_analysis: None,
};
let score = service.calculate_health_score(&results);
prop_assert!(score >= 0.0, "Score should never be negative: {}", score);
prop_assert!(score <= 100.0, "Score should never exceed 100: {}", score);
}
#[test]
fn prop_risk_assessment_consistent_with_health_score(
complexity in prop::option::of(complexity_analysis_strategy()),
dead_code in prop::option::of(dead_code_analysis_strategy()),
total_lines in 1000usize..100000,
) {
let service = EnhancedReportingService::default();
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 50,
total_lines,
complexity_analysis: complexity,
dead_code_analysis: dead_code,
duplication_analysis: None,
tdg_analysis: None,
big_o_analysis: None,
};
let score = service.calculate_health_score(&results);
let risk = service.assess_overall_risk(&results);
match risk {
RiskLevel::Low => prop_assert!(score >= 80.0, "Low risk should have score >= 80: {}", score),
RiskLevel::Medium => prop_assert!(score >= 60.0 && score < 80.0, "Medium risk should have 60 <= score < 80: {}", score),
RiskLevel::High => prop_assert!(score >= 40.0 && score < 60.0, "High risk should have 40 <= score < 60: {}", score),
RiskLevel::Critical => prop_assert!(score < 40.0, "Critical risk should have score < 40: {}", score),
}
}
#[test]
fn prop_serialization_roundtrip(
total_cyclomatic in 1u32..1000,
functions in 1usize..100,
) {
let analysis = ComplexityAnalysis {
total_cyclomatic,
total_cognitive: total_cyclomatic + 100,
functions,
max_cyclomatic: 20,
high_complexity_functions: 5,
distribution: vec![10, 20, 15, 10, 5],
};
let json = serde_json::to_string(&analysis).map_err(|e| proptest::test_runner::TestCaseError::Fail(e.to_string().into()))?;
let deserialized: ComplexityAnalysis = serde_json::from_str(&json).map_err(|e| proptest::test_runner::TestCaseError::Fail(e.to_string().into()))?;
prop_assert_eq!(analysis.total_cyclomatic, deserialized.total_cyclomatic);
prop_assert_eq!(analysis.functions, deserialized.functions);
}
#[test]
fn prop_key_findings_count_reasonable(
max_cc in 0u32..100,
dead_funcs in 0usize..100,
dup_blocks in 0usize..100,
) {
let service = EnhancedReportingService::default();
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 50,
total_lines: 10000,
complexity_analysis: Some(ComplexityAnalysis {
total_cyclomatic: 500,
total_cognitive: 800,
functions: 50,
max_cyclomatic: max_cc,
high_complexity_functions: 10,
distribution: vec![20, 15, 10, 3, 2],
}),
dead_code_analysis: Some(DeadCodeAnalysis {
dead_lines: 200,
dead_functions: dead_funcs,
dead_code_percentage: 2.0,
}),
duplication_analysis: Some(DuplicationAnalysis {
duplicated_lines: 500,
duplicate_blocks: dup_blocks,
duplication_percentage: 5.0,
}),
tdg_analysis: None,
big_o_analysis: None,
};
let findings = service.extract_key_findings(&results);
prop_assert!(findings.len() <= 3);
}
#[test]
fn prop_sections_match_available_analyses(
has_complexity in any::<bool>(),
has_dead_code in any::<bool>(),
has_duplication in any::<bool>(),
has_tdg in any::<bool>(),
has_big_o in any::<bool>(),
) {
let service = EnhancedReportingService::default();
let config = ReportConfig {
project_path: PathBuf::from("/test"),
output_format: ReportFormat::Json,
include_visualizations: false,
include_executive_summary: true,
include_recommendations: true,
confidence_threshold: 80,
output_path: None,
};
let results = AnalysisResults {
total_duration: Duration::from_secs(10),
analyzed_files: 50,
total_lines: 10000,
complexity_analysis: if has_complexity {
Some(ComplexityAnalysis {
total_cyclomatic: 100,
total_cognitive: 150,
functions: 20,
max_cyclomatic: 10,
high_complexity_functions: 0,
distribution: vec![20, 0, 0, 0, 0],
})
} else {
None
},
dead_code_analysis: if has_dead_code {
Some(DeadCodeAnalysis {
dead_lines: 50,
dead_functions: 5,
dead_code_percentage: 0.5,
})
} else {
None
},
duplication_analysis: if has_duplication {
Some(DuplicationAnalysis {
duplicated_lines: 100,
duplicate_blocks: 5,
duplication_percentage: 1.0,
})
} else {
None
},
tdg_analysis: if has_tdg {
Some(TdgAnalysis {
average_tdg: 2.0,
max_tdg: 3.0,
high_tdg_files: 2,
})
} else {
None
},
big_o_analysis: if has_big_o {
Some(BigOAnalysis {
analyzed_functions: 20,
high_complexity_count: 2,
complexity_distribution: HashMap::new(),
})
} else {
None
},
};
let sections = service.build_sections(&results, &config).unwrap();
let expected_count = [has_complexity, has_dead_code, has_duplication, has_tdg, has_big_o]
.iter()
.filter(|&&x| x)
.count();
prop_assert_eq!(sections.len(), expected_count,
"Expected {} sections, got {}",
expected_count, sections.len());
}
}
}