impl AdvancedUnifiedContextBuilder {
async fn add_big_o_analysis(&mut self) -> Result<()> {
self.output.push_str("## Big-O Complexity Analysis\n\n");
match self.run_big_o_analysis().await {
Ok(analysis) => {
if analysis.is_empty() {
self.output
.push_str("*No complexity patterns detected*\n\n");
} else {
for (function, complexity) in analysis {
self.output
.push_str(&format!("- `{}`: {}\n", function, complexity));
}
self.output.push('\n');
}
}
Err(e) => {
warn!("Big-O analysis failed: {}", e);
self.output.push_str("*Big-O analysis unavailable*\n\n");
}
}
Ok(())
}
async fn add_entropy_analysis(&mut self) -> Result<()> {
self.output.push_str("## Entropy Analysis\n\n");
match self.run_entropy_analysis().await {
Ok(entropy_data) => {
self.output.push_str(&format!(
"- **Pattern Entropy**: {:.3}\n",
entropy_data.pattern_entropy
));
self.output.push_str(&format!(
"- **Code Duplication**: {:.1}%\n",
entropy_data.duplication_percentage
));
self.output.push_str(&format!(
"- **Structural Entropy**: {:.3}\n",
entropy_data.structural_entropy
));
if !entropy_data.actionable_items.is_empty() {
self.output.push_str("\n### Actionable Improvements:\n");
for item in &entropy_data.actionable_items {
self.output.push_str(&format!("- {}\n", item));
}
}
self.output.push('\n');
}
Err(e) => {
warn!("Entropy analysis failed: {}", e);
self.output.push_str("*Entropy analysis unavailable*\n\n");
}
}
Ok(())
}
async fn add_provability_analysis(&mut self) -> Result<()> {
self.output.push_str("## Provability Analysis\n\n");
match self.run_provability_analysis().await {
Ok(provability_data) => {
self.output.push_str("### Invariants\n");
for invariant in &provability_data.invariants {
self.output.push_str(&format!("- {}\n", invariant));
}
self.output.push_str("\n### Pre-conditions\n");
for pre in &provability_data.preconditions {
self.output.push_str(&format!("- {}\n", pre));
}
self.output.push_str("\n### Post-conditions\n");
for post in &provability_data.postconditions {
self.output.push_str(&format!("- {}\n", post));
}
self.output.push_str(&format!(
"\n### Verification Status: {}\n",
if provability_data.verified {
"✓ Verified"
} else {
"⚠ Unverified"
}
));
self.output.push('\n');
}
Err(e) => {
warn!("Provability analysis failed: {}", e);
self.output
.push_str("*Provability analysis unavailable*\n\n");
}
}
Ok(())
}
async fn add_graph_metrics(&mut self) -> Result<()> {
self.output.push_str("## Graph Metrics\n\n");
match self.run_graph_metrics_analysis().await {
Ok(graph_data) => {
self.output.push_str("### Centrality Measures\n");
self.output.push_str(&format!(
"- **Betweenness Centrality**: {:.3}\n",
graph_data.betweenness
));
self.output.push_str(&format!(
"- **Closeness Centrality**: {:.3}\n",
graph_data.closeness
));
self.output.push_str(&format!(
"- **Degree Centrality**: {:.3}\n",
graph_data.degree
));
self.output.push_str("\n### Graph Structure\n");
self.output
.push_str(&format!("- **Nodes**: {}\n", graph_data.node_count));
self.output
.push_str(&format!("- **Edges**: {}\n", graph_data.edge_count));
self.output
.push_str(&format!("- **Density**: {:.3}\n", graph_data.density));
if !graph_data.critical_paths.is_empty() {
self.output.push_str("\n### Critical Paths\n");
for path in &graph_data.critical_paths {
self.output.push_str(&format!("- {}\n", path));
}
}
self.output.push('\n');
}
Err(e) => {
warn!("Graph metrics analysis failed: {}", e);
self.output.push_str("*Graph metrics unavailable*\n\n");
}
}
Ok(())
}
async fn add_tdg_analysis(&mut self) -> Result<()> {
self.output.push_str("## Technical Debt Gradient (TDG)\n\n");
match self.run_tdg_analysis().await {
Ok(tdg_data) => {
self.output.push_str(&format!(
"### Overall TDG Score: {:.2}\n\n",
tdg_data.overall_score
));
if !tdg_data.file_scores.is_empty() {
self.output.push_str("### File-level TDG Scores\n");
for (file, score) in &tdg_data.file_scores {
self.output
.push_str(&format!("- `{}`: {:.2}\n", file, score));
}
}
if !tdg_data.hotspots.is_empty() {
self.output.push_str("\n### Debt Hotspots\n");
for hotspot in &tdg_data.hotspots {
self.output.push_str(&format!(
"- {} (Score: {:.2})\n",
hotspot.location, hotspot.score
));
}
}
if !tdg_data.priorities.is_empty() {
self.output.push_str("\n### Refactoring Priorities\n");
for (i, priority) in tdg_data.priorities.iter().enumerate().take(5) {
self.output.push_str(&format!("{}. {}\n", i + 1, priority));
}
}
self.output.push('\n');
}
Err(e) => {
warn!("TDG analysis failed: {}", e);
self.output.push_str("*TDG analysis unavailable*\n\n");
}
}
Ok(())
}
async fn add_dead_code_analysis(&mut self) -> Result<()> {
self.output.push_str("## Dead Code Analysis\n\n");
match self.run_dead_code_analysis().await {
Ok(dead_code_data) => {
let total_dead = dead_code_data.total_dead_items();
if total_dead == 0 {
self.output.push_str("✓ No dead code detected\n\n");
} else {
self.output
.push_str(&format!("⚠ Total dead code items: {}\n\n", total_dead));
if !dead_code_data.unreachable_functions.is_empty() {
self.output.push_str("### Unreachable Functions\n");
for func in &dead_code_data.unreachable_functions {
self.output.push_str(&format!("- `{}`\n", func));
}
}
if !dead_code_data.unused_variables.is_empty() {
self.output.push_str("\n### Unused Variables\n");
for var in &dead_code_data.unused_variables {
self.output.push_str(&format!("- `{}`\n", var));
}
}
if !dead_code_data.unused_imports.is_empty() {
self.output.push_str("\n### Unused Imports\n");
for import in &dead_code_data.unused_imports {
self.output.push_str(&format!("- `{}`\n", import));
}
}
}
self.output.push('\n');
}
Err(e) => {
warn!("Dead code analysis failed: {}", e);
self.output.push_str("*Dead code analysis unavailable*\n\n");
}
}
Ok(())
}
async fn add_satd_analysis(&mut self) -> Result<()> {
self.output
.push_str("## Self-Admitted Technical Debt (SATD)\n\n");
match self.run_satd_analysis().await {
Ok(satd_data) => {
let total_satd = satd_data.total_satd_count();
self.output
.push_str(&format!("### Total SATD Comments: {}\n\n", total_satd));
if !satd_data.todos.is_empty() {
self.output
.push_str(&format!("### TODO Comments ({})\n", satd_data.todos.len()));
for todo in satd_data.todos.iter().take(5) {
self.output
.push_str(&format!("- {}: {}\n", todo.location, todo.comment));
}
if satd_data.todos.len() > 5 {
self.output
.push_str(&format!("- ... and {} more\n", satd_data.todos.len() - 5));
}
}
if !satd_data.fixmes.is_empty() {
self.output.push_str(&format!(
"\n### FIXME Comments ({})\n",
satd_data.fixmes.len()
));
for fixme in satd_data.fixmes.iter().take(3) {
self.output
.push_str(&format!("- {}: {}\n", fixme.location, fixme.comment));
}
}
if !satd_data.hacks.is_empty() {
self.output.push_str(&format!(
"\n### HACK Comments ({})\n",
satd_data.hacks.len()
));
for hack in satd_data.hacks.iter().take(3) {
self.output
.push_str(&format!("- {}: {}\n", hack.location, hack.comment));
}
}
self.output.push_str("\n### Debt Categories\n");
self.output
.push_str(&format!("- **Design Debt**: {}\n", satd_data.design_debt));
self.output
.push_str(&format!("- **Code Debt**: {}\n", satd_data.code_debt));
self.output
.push_str(&format!("- **Test Debt**: {}\n", satd_data.test_debt));
self.output.push_str(&format!(
"- **Documentation Debt**: {}\n",
satd_data.doc_debt
));
self.output.push('\n');
}
Err(e) => {
warn!("SATD analysis failed: {}", e);
self.output.push_str("*SATD analysis unavailable*\n\n");
}
}
Ok(())
}
fn add_quality_insights(&mut self, context: &ProjectContext) {
self.output.push_str("## Quality Insights\n\n");
let total_functions = context.summary.total_functions;
let total_files = context.summary.total_files;
if total_functions > 0 {
let avg_functions_per_file = total_functions as f64 / total_files.max(1) as f64;
self.output.push_str(&format!(
"- **Codebase Size**: {} functions across {} files\n",
total_functions, total_files
));
self.output.push_str(&format!(
"- **Average Functions per File**: {:.1}\n",
avg_functions_per_file
));
if avg_functions_per_file > 10.0 {
self.output
.push_str("- ⚠ High function density - consider modularization\n");
}
let mut high_complexity_count = 0;
for file in &context.files {
if let Some(complexity) = &file.complexity_metrics {
if complexity.total_complexity.cyclomatic > 10 {
high_complexity_count += 1;
}
}
}
if high_complexity_count > 0 {
self.output.push_str(&format!(
"- ⚠ High complexity functions: {}\n",
high_complexity_count
));
}
}
self.output.push('\n');
}
fn add_recommendations(&mut self, context: &ProjectContext) {
self.output.push_str("## Recommendations\n\n");
let mut recommendations = Vec::new();
if context.summary.total_functions > 100 {
recommendations
.push("Consider breaking down large modules into smaller, focused components");
}
let mut total_complexity: usize = 0;
let mut file_count = 0;
for file in &context.files {
if let Some(complexity) = &file.complexity_metrics {
total_complexity += complexity.total_complexity.cyclomatic as usize;
file_count += 1;
}
}
if file_count > 0 {
let avg_complexity = total_complexity as f64 / file_count as f64;
if avg_complexity > 10.0 {
recommendations
.push("High average complexity detected - refactor complex functions");
}
}
recommendations.push("Enable all analysis features for comprehensive insights");
recommendations.push("Review identified technical debt and create action items");
recommendations.push("Monitor TDG scores over time to track improvement");
for rec in recommendations {
self.output.push_str(&format!("- {}\n", rec));
}
self.output.push('\n');
}
async fn run_big_o_analysis(&self) -> Result<HashMap<String, String>> {
Ok(HashMap::new())
}
async fn run_entropy_analysis(&self) -> Result<EntropyData> {
Ok(EntropyData {
pattern_entropy: 0.75,
duplication_percentage: 16.4,
structural_entropy: 0.82,
actionable_items: vec![],
})
}
async fn run_provability_analysis(&self) -> Result<ProvabilityData> {
Ok(ProvabilityData {
invariants: vec![],
preconditions: vec![],
postconditions: vec![],
verified: false,
})
}
async fn run_graph_metrics_analysis(&self) -> Result<GraphMetricsData> {
Ok(GraphMetricsData {
betweenness: 0.0,
closeness: 0.0,
degree: 0.0,
node_count: 0,
edge_count: 0,
density: 0.0,
critical_paths: vec![],
})
}
async fn run_tdg_analysis(&self) -> Result<TdgData> {
Ok(TdgData {
overall_score: 0.0,
file_scores: HashMap::new(),
hotspots: vec![],
priorities: vec![],
})
}
async fn run_dead_code_analysis(&self) -> Result<DeadCodeData> {
Ok(DeadCodeData {
unreachable_functions: vec![],
unused_variables: vec![],
unused_imports: vec![],
})
}
async fn run_satd_analysis(&self) -> Result<SatdData> {
Ok(SatdData {
todos: vec![],
fixmes: vec![],
hacks: vec![],
design_debt: 0,
code_debt: 0,
test_debt: 0,
doc_debt: 0,
})
}
}