1use std::collections::HashMap;
19use std::fs;
20use std::path::{Path, PathBuf};
21use std::time::Instant;
22
23use anyhow::Result;
24use clap::Args;
25use serde_json::Value;
26use walkdir::WalkDir;
27
28use super::ast_cache::AstCache;
29use super::error::{RemainingError, RemainingResult};
30use super::types::{TodoItem, TodoReport, TodoSummary};
31
32use crate::output::OutputWriter;
33
34use crate::commands::dead::collect_module_infos_with_refcounts;
36use tldr_core::analysis::dead::dead_code_analysis_refcount;
37use tldr_core::{collect_all_functions, get_code_structure, FunctionRef, IgnoreSpec, Language};
38
39const PRIORITY_DEAD_CODE: u32 = 1;
45const PRIORITY_COMPLEXITY: u32 = 2;
46const PRIORITY_COHESION: u32 = 3;
47const PRIORITY_EQUIVALENCE: u32 = 4;
48const PRIORITY_SIMILAR: u32 = 5;
49
50#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub enum SubAnalysis {
57 Dead,
58 Complexity,
59 Cohesion,
60 Equivalence,
61 Similar,
62}
63
64impl SubAnalysis {
65 pub fn all() -> &'static [SubAnalysis] {
67 &[
68 SubAnalysis::Dead,
69 SubAnalysis::Complexity,
70 SubAnalysis::Cohesion,
71 SubAnalysis::Equivalence,
72 SubAnalysis::Similar,
73 ]
74 }
75
76 pub fn quick() -> &'static [SubAnalysis] {
78 &[
79 SubAnalysis::Dead,
80 SubAnalysis::Complexity,
81 SubAnalysis::Cohesion,
82 SubAnalysis::Equivalence,
83 ]
84 }
85
86 pub fn priority(&self) -> u32 {
88 match self {
89 SubAnalysis::Dead => PRIORITY_DEAD_CODE,
90 SubAnalysis::Complexity => PRIORITY_COMPLEXITY,
91 SubAnalysis::Cohesion => PRIORITY_COHESION,
92 SubAnalysis::Equivalence => PRIORITY_EQUIVALENCE,
93 SubAnalysis::Similar => PRIORITY_SIMILAR,
94 }
95 }
96
97 pub fn category(&self) -> &'static str {
99 match self {
100 SubAnalysis::Dead => "dead_code",
101 SubAnalysis::Complexity => "complexity",
102 SubAnalysis::Cohesion => "cohesion",
103 SubAnalysis::Equivalence => "equivalence",
104 SubAnalysis::Similar => "similar",
105 }
106 }
107}
108
109impl std::str::FromStr for SubAnalysis {
110 type Err = String;
111
112 fn from_str(s: &str) -> Result<Self, Self::Err> {
113 match s.to_lowercase().as_str() {
114 "dead" | "dead_code" => Ok(SubAnalysis::Dead),
115 "complexity" | "complex" => Ok(SubAnalysis::Complexity),
116 "cohesion" | "lcom4" => Ok(SubAnalysis::Cohesion),
117 "equivalence" | "equiv" | "gvn" => Ok(SubAnalysis::Equivalence),
118 "similar" | "sim" => Ok(SubAnalysis::Similar),
119 _ => Err(format!("Unknown analysis: {}", s)),
120 }
121 }
122}
123
124#[derive(Debug, Args)]
141pub struct TodoArgs {
142 pub path: PathBuf,
144
145 #[arg(long)]
147 pub detail: Option<String>,
148
149 #[arg(long)]
151 pub quick: bool,
152
153 #[arg(long, default_value = "20")]
155 pub max_items: usize,
156
157 #[arg(long, short = 'O')]
159 pub output: Option<PathBuf>,
160}
161
162impl TodoArgs {
163 pub fn run(
165 &self,
166 format: crate::output::OutputFormat,
167 quiet: bool,
168 lang: Option<Language>,
169 ) -> Result<()> {
170 let writer = OutputWriter::new(format, quiet);
171 let start = Instant::now();
172
173 writer.progress(&format!(
174 "Analyzing {} for improvements...",
175 self.path.display()
176 ));
177
178 if !self.path.exists() {
180 return Err(RemainingError::file_not_found(&self.path).into());
181 }
182
183 let language = if let Some(l) = lang {
185 l
186 } else {
187 detect_language(&self.path)?
188 };
189
190 let mut cache = AstCache::default();
192
193 let analyses = if self.quick {
195 SubAnalysis::quick()
196 } else {
197 SubAnalysis::all()
198 };
199
200 let mut sub_results: HashMap<String, Value> = HashMap::new();
202 let mut all_items: Vec<TodoItem> = Vec::new();
203 let mut summary = TodoSummary::default();
204
205 for analysis in analyses {
206 writer.progress(&format!("Running {} analysis...", analysis.category()));
207
208 match run_sub_analysis(*analysis, &self.path, language, &mut cache) {
209 Ok((items, result_value)) => {
210 update_summary(&mut summary, *analysis, &items);
212
213 if let Some(ref detail) = self.detail {
215 if let Ok(detail_analysis) = detail.parse::<SubAnalysis>() {
216 if detail_analysis == *analysis {
217 sub_results.insert(analysis.category().to_string(), result_value);
218 }
219 }
220 }
221
222 all_items.extend(items);
224 }
225 Err(e) => {
226 writer.progress(&format!(
228 "Warning: {} analysis failed: {}",
229 analysis.category(),
230 e
231 ));
232 }
233 }
234 }
235
236 all_items.sort_by_key(|item| item.priority);
238
239 let total_items = all_items.len();
241 let truncated = self.max_items > 0 && total_items > self.max_items;
242 if truncated {
243 all_items.truncate(self.max_items);
244 }
245
246 let elapsed_ms = start.elapsed().as_secs_f64() * 1000.0;
248 let report = TodoReport {
249 wrapper: "todo".to_string(),
250 path: self.path.display().to_string(),
251 items: all_items,
252 summary,
253 sub_results,
254 total_elapsed_ms: elapsed_ms,
255 };
256
257 if let Some(ref output_path) = self.output {
259 if writer.is_text() {
261 let text = format_todo_text(&report, truncated, total_items);
262 fs::write(output_path, text)?;
263 } else {
264 let json = serde_json::to_string_pretty(&report)?;
265 fs::write(output_path, json)?;
266 }
267 } else {
268 if writer.is_text() {
270 let text = format_todo_text(&report, truncated, total_items);
271 writer.write_text(&text)?;
272 } else {
273 writer.write(&report)?;
274 }
275 }
276
277 Ok(())
278 }
279}
280
281fn run_sub_analysis(
287 analysis: SubAnalysis,
288 path: &Path,
289 language: Language,
290 _cache: &mut AstCache,
291) -> RemainingResult<(Vec<TodoItem>, Value)> {
292 match analysis {
293 SubAnalysis::Dead => run_dead_analysis(path, language),
294 SubAnalysis::Complexity => run_complexity_analysis(path, language),
295 SubAnalysis::Cohesion => run_cohesion_analysis(path),
296 SubAnalysis::Equivalence => run_equivalence_analysis(path),
297 SubAnalysis::Similar => run_similar_analysis(path),
298 }
299}
300
301fn run_dead_analysis(path: &Path, language: Language) -> RemainingResult<(Vec<TodoItem>, Value)> {
303 let project_root = if path.is_file() {
305 path.parent().unwrap_or(path)
306 } else {
307 path
308 };
309
310 let (module_infos, merged_ref_counts) =
312 collect_module_infos_with_refcounts(project_root, language);
313 let all_functions: Vec<FunctionRef> = collect_all_functions(&module_infos);
314
315 let report = dead_code_analysis_refcount(&all_functions, &merged_ref_counts, None)
317 .map_err(|e| RemainingError::analysis_error(format!("Dead code analysis failed: {}", e)))?;
318
319 let items: Vec<TodoItem> = report
321 .dead_functions
322 .iter()
323 .map(|func| {
324 TodoItem::new(
325 "dead_code",
326 PRIORITY_DEAD_CODE,
327 format!("Unused function: {}", func.name),
328 )
329 .with_location(func.file.display().to_string(), 0)
330 .with_severity("medium")
331 })
332 .collect();
333
334 let result_value = serde_json::to_value(&report).unwrap_or(Value::Null);
335
336 Ok((items, result_value))
337}
338
339fn run_complexity_analysis(
341 path: &Path,
342 language: Language,
343) -> RemainingResult<(Vec<TodoItem>, Value)> {
344 let structure = get_code_structure(path, language, 0, Some(&IgnoreSpec::default()))
346 .map_err(|e| RemainingError::analysis_error(format!("Failed to get structure: {}", e)))?;
347
348 let mut items = Vec::new();
349
350 for file in &structure.files {
352 for func_name in &file.functions {
353 let file_path = path.join(&file.path);
354 if let Ok(metrics) = tldr_core::calculate_complexity(
355 file_path.to_str().unwrap_or_default(),
356 func_name,
357 language,
358 ) {
359 if metrics.cyclomatic > 10 {
360 items.push(
361 TodoItem::new(
362 "complexity",
363 PRIORITY_COMPLEXITY,
364 format!(
365 "High complexity in {}: cyclomatic={}, consider refactoring",
366 func_name, metrics.cyclomatic
367 ),
368 )
369 .with_location(file.path.display().to_string(), 1)
370 .with_severity(if metrics.cyclomatic > 20 {
371 "high"
372 } else {
373 "medium"
374 })
375 .with_score(metrics.cyclomatic as f64 / 50.0),
376 );
377 }
378 }
379 }
380 }
381
382 let result_value = serde_json::json!({
383 "hotspots": items.len(),
384 "threshold": 10
385 });
386
387 Ok((items, result_value))
388}
389
390fn run_cohesion_analysis(path: &Path) -> RemainingResult<(Vec<TodoItem>, Value)> {
392 use crate::commands::patterns::cohesion::{run as run_cohesion, CohesionArgs};
393
394 let args = CohesionArgs {
395 path: path.to_path_buf(),
396 min_methods: 1,
397 include_dunder: false,
398 output_format: crate::commands::patterns::cohesion::OutputFormat::Json,
399 timeout: 30,
400 project_root: None,
401 lang: None,
402 };
403
404 let report = run_cohesion(args)
405 .map_err(|e| RemainingError::analysis_error(format!("Cohesion analysis failed: {}", e)))?;
406
407 let items: Vec<TodoItem> = report
408 .classes
409 .iter()
410 .filter(|c| c.lcom4 > 1)
411 .map(|c| {
412 TodoItem::new(
413 "cohesion",
414 PRIORITY_COHESION,
415 format!(
416 "Low cohesion in class {}: LCOM4={}, consider splitting",
417 c.class_name, c.lcom4
418 ),
419 )
420 .with_location(c.file_path.clone(), c.line)
421 .with_severity(if c.lcom4 > 3 { "high" } else { "medium" })
422 .with_score(c.lcom4 as f64 / 5.0)
423 })
424 .collect();
425
426 let result_value = serde_json::to_value(&report).unwrap_or(Value::Null);
427
428 Ok((items, result_value))
429}
430
431fn run_equivalence_analysis(_path: &Path) -> RemainingResult<(Vec<TodoItem>, Value)> {
433 let result_value = serde_json::json!({
436 "status": "not_implemented",
437 "message": "GVN equivalence analysis will be implemented in Phase 9"
438 });
439
440 Ok((Vec::new(), result_value))
441}
442
443fn run_similar_analysis(_path: &Path) -> RemainingResult<(Vec<TodoItem>, Value)> {
445 let result_value = serde_json::json!({
448 "status": "skipped",
449 "message": "Similar code analysis is expensive, consider using 'tldr similar' directly"
450 });
451
452 Ok((Vec::new(), result_value))
453}
454
455fn detect_language(path: &Path) -> RemainingResult<Language> {
461 if path.is_file() {
463 let ext = path
464 .extension()
465 .and_then(|e| e.to_str())
466 .unwrap_or_default();
467
468 match ext {
469 "py" => Ok(Language::Python),
470 "ts" | "tsx" => Ok(Language::TypeScript),
471 "js" | "jsx" => Ok(Language::JavaScript),
472 "rs" => Ok(Language::Rust),
473 "go" => Ok(Language::Go),
474 _ => Err(RemainingError::unsupported_language(ext)),
475 }
476 } else if path.is_dir() {
477 for entry in WalkDir::new(path)
479 .max_depth(2)
480 .into_iter()
481 .filter_map(|e| e.ok())
482 {
483 if let Some(ext) = entry.path().extension().and_then(|e| e.to_str()) {
484 match ext {
485 "py" => return Ok(Language::Python),
486 "ts" | "tsx" => return Ok(Language::TypeScript),
487 "js" | "jsx" => return Ok(Language::JavaScript),
488 "rs" => return Ok(Language::Rust),
489 "go" => return Ok(Language::Go),
490 _ => continue,
491 }
492 }
493 }
494 Ok(Language::Python)
496 } else {
497 Err(RemainingError::file_not_found(path))
498 }
499}
500
501fn update_summary(summary: &mut TodoSummary, analysis: SubAnalysis, items: &[TodoItem]) {
503 match analysis {
504 SubAnalysis::Dead => summary.dead_count = items.len() as u32,
505 SubAnalysis::Complexity => summary.hotspot_count = items.len() as u32,
506 SubAnalysis::Cohesion => summary.low_cohesion_count = items.len() as u32,
507 SubAnalysis::Equivalence => summary.equivalence_groups = items.len() as u32,
508 SubAnalysis::Similar => summary.similar_pairs = items.len() as u32,
509 }
510}
511
512pub fn format_todo_text(report: &TodoReport, truncated: bool, total_items: usize) -> String {
518 let mut lines = Vec::new();
519
520 lines.push(format!("TODO Report for: {}", report.path));
521 lines.push(format!("Total items: {}", total_items));
522 lines.push(String::new());
523
524 lines.push("Summary:".to_string());
526 lines.push(format!(" Dead code items: {}", report.summary.dead_count));
527 lines.push(format!(
528 " Complexity hotspots: {}",
529 report.summary.hotspot_count
530 ));
531 lines.push(format!(
532 " Low cohesion classes: {}",
533 report.summary.low_cohesion_count
534 ));
535 lines.push(format!(
536 " Similar code pairs: {}",
537 report.summary.similar_pairs
538 ));
539 lines.push(format!(
540 " Equivalence groups: {}",
541 report.summary.equivalence_groups
542 ));
543 lines.push(String::new());
544
545 if report.items.is_empty() {
546 lines.push("No improvement items found.".to_string());
547 } else {
548 lines.push("Items (sorted by priority):".to_string());
549 lines.push(String::new());
550
551 for (i, item) in report.items.iter().enumerate() {
552 lines.push(format!(
553 "{}. [{}] {} (priority: {})",
554 i + 1,
555 item.category,
556 item.description,
557 item.priority
558 ));
559
560 if !item.file.is_empty() {
561 lines.push(format!(" Location: {}:{}", item.file, item.line));
562 }
563
564 if !item.severity.is_empty() {
565 lines.push(format!(" Severity: {}", item.severity));
566 }
567 }
568
569 if truncated {
570 let remaining = total_items - report.items.len();
571 lines.push(String::new());
572 lines.push(format!(
573 "... and {} more items. Use --max-items 0 to show all.",
574 remaining
575 ));
576 }
577 }
578
579 lines.push(String::new());
580 lines.push(format!("Analysis time: {:.2}ms", report.total_elapsed_ms));
581
582 lines.join("\n")
583}
584
585#[cfg(test)]
590mod tests {
591 use super::*;
592
593 #[test]
594 fn test_sub_analysis_from_str() {
595 assert_eq!("dead".parse::<SubAnalysis>().unwrap(), SubAnalysis::Dead);
596 assert_eq!(
597 "complexity".parse::<SubAnalysis>().unwrap(),
598 SubAnalysis::Complexity
599 );
600 assert_eq!(
601 "cohesion".parse::<SubAnalysis>().unwrap(),
602 SubAnalysis::Cohesion
603 );
604 assert!("unknown".parse::<SubAnalysis>().is_err());
605 }
606
607 #[test]
608 fn test_sub_analysis_priority() {
609 assert!(SubAnalysis::Dead.priority() < SubAnalysis::Complexity.priority());
610 assert!(SubAnalysis::Complexity.priority() < SubAnalysis::Cohesion.priority());
611 }
612
613 #[test]
614 fn test_quick_mode_skips_similar() {
615 let quick = SubAnalysis::quick();
616 let all = SubAnalysis::all();
617
618 assert!(quick.len() < all.len());
619 assert!(!quick.contains(&SubAnalysis::Similar));
620 assert!(all.contains(&SubAnalysis::Similar));
621 }
622
623 #[test]
624 fn test_format_todo_text() {
625 let mut report = TodoReport::new("/path/to/project");
626 report
627 .items
628 .push(TodoItem::new("dead_code", 1, "Unused function"));
629 report.summary.dead_count = 1;
630 report.total_elapsed_ms = 100.5;
631
632 let text = format_todo_text(&report, false, 1);
633 assert!(text.contains("TODO Report"));
634 assert!(text.contains("Dead code items: 1"));
635 assert!(text.contains("Unused function"));
636 }
637
638 #[test]
639 fn test_todo_args_max_items_default() {
640 use clap::Parser;
642
643 #[derive(Debug, Parser)]
644 struct Wrapper {
645 #[command(flatten)]
646 todo: TodoArgs,
647 }
648
649 let w = Wrapper::parse_from(["test", "src/"]);
650 assert_eq!(w.todo.max_items, 20, "default max_items should be 20");
651 }
652
653 #[test]
654 fn test_todo_args_max_items_flag() {
655 use clap::Parser;
657
658 #[derive(Debug, Parser)]
659 struct Wrapper {
660 #[command(flatten)]
661 todo: TodoArgs,
662 }
663
664 let w = Wrapper::parse_from(["test", "src/", "--max-items", "10"]);
665 assert_eq!(w.todo.max_items, 10);
666 }
667
668 #[test]
669 fn test_todo_output_respects_max_items() {
670 let mut report = TodoReport::new("/path/to/project");
672 for i in 0..20 {
673 report.items.push(TodoItem::new(
674 "dead_code",
675 1,
676 format!("Unused function: fn_{}", i),
677 ));
678 }
679 report.summary.dead_count = 20;
680 report.total_elapsed_ms = 50.0;
681
682 let max_items: usize = 5;
684 let total = report.items.len();
685 let truncated = total > max_items && max_items > 0;
686 if truncated {
687 report.items.truncate(max_items);
688 }
689
690 let text = format_todo_text(&report, truncated, total);
691 assert!(text.contains("1. [dead_code]"));
693 assert!(text.contains("5. [dead_code]"));
694 assert!(!text.contains("6. [dead_code]"));
695 assert!(text.contains("... and 15 more items"));
697 assert!(text.contains("--max-items 0"));
698 }
699
700 #[test]
701 fn test_todo_output_no_truncation_message_when_not_truncated() {
702 let mut report = TodoReport::new("/path/to/project");
703 for i in 0..3 {
704 report.items.push(TodoItem::new(
705 "dead_code",
706 1,
707 format!("Unused function: fn_{}", i),
708 ));
709 }
710 report.summary.dead_count = 3;
711 report.total_elapsed_ms = 10.0;
712
713 let text = format_todo_text(&report, false, 3);
714 assert!(!text.contains("... and"));
715 assert!(!text.contains("--max-items"));
716 }
717
718 #[test]
719 fn test_detect_language_from_extension() {
720 use std::fs::File;
721 use tempfile::TempDir;
722
723 let temp = TempDir::new().unwrap();
724 let py_file = temp.path().join("test.py");
725 File::create(&py_file).unwrap();
726
727 let lang = detect_language(&py_file).unwrap();
728 assert_eq!(lang, Language::Python);
729 }
730
731 #[test]
732 fn test_run_dead_analysis_uses_refcount() {
733 use std::fs;
736 use tempfile::TempDir;
737
738 let temp = TempDir::new().unwrap();
739 let py_file = temp.path().join("sample.py");
740 fs::write(
743 &py_file,
744 "def used_func():\n pass\n\ndef _dead_func():\n pass\n\nused_func()\n",
745 )
746 .unwrap();
747
748 let (items, value) = run_dead_analysis(temp.path(), Language::Python).unwrap();
749 let dead_names: Vec<&str> = items.iter().map(|i| i.description.as_str()).collect();
752 assert!(
753 dead_names.iter().any(|d| d.contains("_dead_func")),
754 "Expected _dead_func to be reported as dead, got: {:?}",
755 dead_names
756 );
757 assert!(
758 !dead_names.iter().any(|d| d.contains("used_func")),
759 "used_func should NOT be reported as dead, got: {:?}",
760 dead_names
761 );
762 assert!(!value.is_null(), "Expected non-null result value");
764 }
765}