Skip to main content

code_analyze_mcp/
lib.rs

1//! Rust MCP server for code structure analysis using tree-sitter.
2//!
3//! This crate provides three analysis modes for multiple programming languages:
4//!
5//! - **Overview**: Directory tree with file counts and structure
6//! - **FileDetails**: Semantic extraction (functions, classes, assignments, references)
7//! - **SymbolFocus**: Call graphs and dataflow (planned)
8//!
9//! Key types:
10//! - [`analyze::analyze_directory`]: Analyze entire directory tree
11//! - [`analyze::analyze_file`]: Analyze single file
12//! - [`parser::ElementExtractor`]: Parse language-specific elements
13//!
14//! Languages supported: Rust, Go, Java, Python, TypeScript.
15
16pub mod analyze;
17pub mod cache;
18pub mod completion;
19pub mod formatter;
20pub mod graph;
21pub mod lang;
22pub mod languages;
23pub mod logging;
24pub mod metrics;
25pub mod pagination;
26pub mod parser;
27pub(crate) mod schema_helpers;
28pub mod test_detection;
29pub mod traversal;
30pub mod types;
31
32use cache::AnalysisCache;
33use formatter::{
34    format_file_details_paginated, format_file_details_summary, format_focused_paginated,
35    format_module_info, format_structure_paginated, format_summary,
36};
37use logging::LogEvent;
38use pagination::{
39    CursorData, DEFAULT_PAGE_SIZE, PaginationMode, decode_cursor, encode_cursor, paginate_slice,
40};
41use rmcp::handler::server::tool::{ToolRouter, schema_for_type};
42use rmcp::handler::server::wrapper::Parameters;
43use rmcp::model::{
44    CallToolResult, CancelledNotificationParam, CompleteRequestParams, CompleteResult,
45    CompletionInfo, Content, ErrorData, Implementation, InitializeResult, LoggingLevel,
46    LoggingMessageNotificationParam, Meta, Notification, NumberOrString, ProgressNotificationParam,
47    ProgressToken, ServerCapabilities, ServerNotification, SetLevelRequestParams,
48};
49use rmcp::service::{NotificationContext, RequestContext};
50use rmcp::{Peer, RoleServer, ServerHandler, tool, tool_handler, tool_router};
51use serde_json::Value;
52use std::path::Path;
53use std::sync::{Arc, Mutex};
54use tokio::sync::{Mutex as TokioMutex, mpsc};
55use tracing::{instrument, warn};
56use tracing_subscriber::filter::LevelFilter;
57use traversal::walk_directory;
58use types::{
59    AnalysisMode, AnalyzeDirectoryParams, AnalyzeFileParams, AnalyzeModuleParams,
60    AnalyzeSymbolParams,
61};
62
63static GLOBAL_SESSION_COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
64
65const SIZE_LIMIT: usize = 50_000;
66
67pub fn summary_cursor_conflict(summary: Option<bool>, cursor: Option<&str>) -> bool {
68    summary == Some(true) && cursor.is_some()
69}
70
71fn error_meta(
72    category: &'static str,
73    is_retryable: bool,
74    suggested_action: &'static str,
75) -> Option<serde_json::Value> {
76    Some(serde_json::json!({
77        "errorCategory": category,
78        "isRetryable": is_retryable,
79        "suggestedAction": suggested_action,
80    }))
81}
82
83fn err_to_tool_result(e: ErrorData) -> CallToolResult {
84    CallToolResult::error(vec![Content::text(e.message)])
85}
86
87fn no_cache_meta() -> Meta {
88    let mut m = serde_json::Map::new();
89    m.insert(
90        "cache_hint".to_string(),
91        serde_json::Value::String("no-cache".to_string()),
92    );
93    Meta(m)
94}
95
96/// Helper function for paginating focus chains (callers or callees).
97/// Returns (items, re-encoded_cursor_option).
98fn paginate_focus_chains(
99    chains: &[graph::CallChain],
100    mode: PaginationMode,
101    offset: usize,
102    page_size: usize,
103) -> Result<(Vec<graph::CallChain>, Option<String>), ErrorData> {
104    let paginated = paginate_slice(chains, offset, page_size, mode).map_err(|e| {
105        ErrorData::new(
106            rmcp::model::ErrorCode::INTERNAL_ERROR,
107            e.to_string(),
108            error_meta("transient", true, "retry the request"),
109        )
110    })?;
111
112    if paginated.next_cursor.is_none() && offset == 0 {
113        return Ok((paginated.items, None));
114    }
115
116    let next = if let Some(raw_cursor) = paginated.next_cursor {
117        let decoded = decode_cursor(&raw_cursor).map_err(|e| {
118            ErrorData::new(
119                rmcp::model::ErrorCode::INVALID_PARAMS,
120                e.to_string(),
121                error_meta("validation", false, "invalid cursor format"),
122            )
123        })?;
124        Some(
125            encode_cursor(&CursorData {
126                mode,
127                offset: decoded.offset,
128            })
129            .map_err(|e| {
130                ErrorData::new(
131                    rmcp::model::ErrorCode::INVALID_PARAMS,
132                    e.to_string(),
133                    error_meta("validation", false, "invalid cursor format"),
134                )
135            })?,
136        )
137    } else {
138        None
139    };
140
141    Ok((paginated.items, next))
142}
143
144#[derive(Clone)]
145pub struct CodeAnalyzer {
146    tool_router: ToolRouter<Self>,
147    cache: AnalysisCache,
148    peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
149    log_level_filter: Arc<Mutex<LevelFilter>>,
150    event_rx: Arc<TokioMutex<Option<mpsc::UnboundedReceiver<LogEvent>>>>,
151    metrics_tx: crate::metrics::MetricsSender,
152    session_call_seq: Arc<std::sync::atomic::AtomicU32>,
153    session_id: Arc<TokioMutex<Option<String>>>,
154}
155
156#[tool_router]
157impl CodeAnalyzer {
158    pub fn list_tools() -> Vec<rmcp::model::Tool> {
159        Self::tool_router().list_all()
160    }
161
162    pub fn new(
163        peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
164        log_level_filter: Arc<Mutex<LevelFilter>>,
165        event_rx: mpsc::UnboundedReceiver<LogEvent>,
166        metrics_tx: crate::metrics::MetricsSender,
167    ) -> Self {
168        CodeAnalyzer {
169            tool_router: Self::tool_router(),
170            cache: AnalysisCache::new(100),
171            peer,
172            log_level_filter,
173            event_rx: Arc::new(TokioMutex::new(Some(event_rx))),
174            metrics_tx,
175            session_call_seq: Arc::new(std::sync::atomic::AtomicU32::new(0)),
176            session_id: Arc::new(TokioMutex::new(None)),
177        }
178    }
179
180    #[instrument(skip(self))]
181    async fn emit_progress(
182        &self,
183        peer: Option<Peer<RoleServer>>,
184        token: &ProgressToken,
185        progress: f64,
186        total: f64,
187        message: String,
188    ) {
189        if let Some(peer) = peer {
190            let notification = ServerNotification::ProgressNotification(Notification::new(
191                ProgressNotificationParam {
192                    progress_token: token.clone(),
193                    progress,
194                    total: Some(total),
195                    message: Some(message),
196                },
197            ));
198            if let Err(e) = peer.send_notification(notification).await {
199                warn!("Failed to send progress notification: {}", e);
200            }
201        }
202    }
203
204    /// Private helper: Extract analysis logic for overview mode (analyze_directory).
205    /// Returns the complete analysis output after spawning and monitoring progress.
206    /// Cancels the blocking task when `ct` is triggered; returns an error on cancellation.
207    #[instrument(skip(self, params, ct))]
208    async fn handle_overview_mode(
209        &self,
210        params: &AnalyzeDirectoryParams,
211        ct: tokio_util::sync::CancellationToken,
212    ) -> Result<analyze::AnalysisOutput, ErrorData> {
213        let path = Path::new(&params.path);
214        let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
215        let counter_clone = counter.clone();
216        let path_owned = path.to_path_buf();
217        let max_depth = params.max_depth;
218        let ct_clone = ct.clone();
219
220        // Collect entries once for analysis
221        let entries = walk_directory(path, max_depth).map_err(|e| {
222            ErrorData::new(
223                rmcp::model::ErrorCode::INTERNAL_ERROR,
224                format!("Failed to walk directory: {}", e),
225                error_meta("resource", false, "check path permissions and availability"),
226            )
227        })?;
228
229        // Get total file count for progress reporting
230        let total_files = entries.iter().filter(|e| !e.is_dir).count();
231
232        // Spawn blocking analysis with progress tracking
233        let handle = tokio::task::spawn_blocking(move || {
234            analyze::analyze_directory_with_progress(&path_owned, entries, counter_clone, ct_clone)
235        });
236
237        // Poll and emit progress every 100ms
238        let token = ProgressToken(NumberOrString::String(
239            format!(
240                "analyze-overview-{}",
241                std::time::SystemTime::now()
242                    .duration_since(std::time::UNIX_EPOCH)
243                    .map(|d| d.as_nanos())
244                    .unwrap_or(0)
245            )
246            .into(),
247        ));
248        let peer = self.peer.lock().await.clone();
249        let mut last_progress = 0usize;
250        let mut cancelled = false;
251        loop {
252            tokio::time::sleep(std::time::Duration::from_millis(100)).await;
253            if ct.is_cancelled() {
254                cancelled = true;
255                break;
256            }
257            let current = counter.load(std::sync::atomic::Ordering::Relaxed);
258            if current != last_progress && total_files > 0 {
259                self.emit_progress(
260                    peer.clone(),
261                    &token,
262                    current as f64,
263                    total_files as f64,
264                    format!("Analyzing {}/{} files", current, total_files),
265                )
266                .await;
267                last_progress = current;
268            }
269            if handle.is_finished() {
270                break;
271            }
272        }
273
274        // Emit final 100% progress only if not cancelled
275        if !cancelled && total_files > 0 {
276            self.emit_progress(
277                peer.clone(),
278                &token,
279                total_files as f64,
280                total_files as f64,
281                format!("Completed analyzing {} files", total_files),
282            )
283            .await;
284        }
285
286        match handle.await {
287            Ok(Ok(output)) => Ok(output),
288            Ok(Err(analyze::AnalyzeError::Cancelled)) => Err(ErrorData::new(
289                rmcp::model::ErrorCode::INTERNAL_ERROR,
290                "Analysis cancelled".to_string(),
291                error_meta("transient", true, "analysis was cancelled"),
292            )),
293            Ok(Err(e)) => Err(ErrorData::new(
294                rmcp::model::ErrorCode::INTERNAL_ERROR,
295                format!("Error analyzing directory: {}", e),
296                error_meta("resource", false, "check path and file permissions"),
297            )),
298            Err(e) => Err(ErrorData::new(
299                rmcp::model::ErrorCode::INTERNAL_ERROR,
300                format!("Task join error: {}", e),
301                error_meta("transient", true, "retry the request"),
302            )),
303        }
304    }
305
306    /// Private helper: Extract analysis logic for file details mode (analyze_file).
307    /// Returns the cached or newly analyzed file output.
308    #[instrument(skip(self, params))]
309    async fn handle_file_details_mode(
310        &self,
311        params: &AnalyzeFileParams,
312    ) -> Result<std::sync::Arc<analyze::FileAnalysisOutput>, ErrorData> {
313        // Build cache key from file metadata
314        let cache_key = std::fs::metadata(&params.path).ok().and_then(|meta| {
315            meta.modified().ok().map(|mtime| cache::CacheKey {
316                path: std::path::PathBuf::from(&params.path),
317                modified: mtime,
318                mode: AnalysisMode::FileDetails,
319            })
320        });
321
322        // Check cache first
323        if let Some(ref key) = cache_key
324            && let Some(cached) = self.cache.get(key)
325        {
326            return Ok(cached);
327        }
328
329        // Cache miss or no cache key, analyze and optionally store
330        match analyze::analyze_file(&params.path, params.ast_recursion_limit) {
331            Ok(output) => {
332                let arc_output = std::sync::Arc::new(output);
333                if let Some(ref key) = cache_key {
334                    self.cache.put(key.clone(), arc_output.clone());
335                }
336                Ok(arc_output)
337            }
338            Err(e) => Err(ErrorData::new(
339                rmcp::model::ErrorCode::INTERNAL_ERROR,
340                format!("Error analyzing file: {}", e),
341                error_meta("resource", false, "check file path and permissions"),
342            )),
343        }
344    }
345
346    /// Private helper: Extract analysis logic for focused mode (analyze_symbol).
347    /// Returns the complete focused analysis output after spawning and monitoring progress.
348    /// Cancels the blocking task when `ct` is triggered; returns an error on cancellation.
349    #[instrument(skip(self, params, ct))]
350    async fn handle_focused_mode(
351        &self,
352        params: &AnalyzeSymbolParams,
353        ct: tokio_util::sync::CancellationToken,
354    ) -> Result<analyze::FocusedAnalysisOutput, ErrorData> {
355        let follow_depth = params.follow_depth.unwrap_or(1);
356        let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
357        let counter_clone = counter.clone();
358        let path = Path::new(&params.path);
359        let path_owned = path.to_path_buf();
360        let max_depth = params.max_depth;
361        let symbol_owned = params.symbol.clone();
362        let match_mode = params.match_mode.clone().unwrap_or_default();
363        let ast_recursion_limit = params.ast_recursion_limit;
364        let ct_clone = ct.clone();
365
366        // Compute use_summary before spawning: explicit params only
367        let use_summary_for_task = params.output_control.force != Some(true)
368            && params.output_control.summary == Some(true);
369
370        // Get total file count for progress reporting
371        let total_files = match walk_directory(path, max_depth) {
372            Ok(entries) => entries.iter().filter(|e| !e.is_dir).count(),
373            Err(_) => 0,
374        };
375
376        // Spawn blocking analysis with progress tracking
377        let handle = tokio::task::spawn_blocking(move || {
378            analyze::analyze_focused_with_progress(
379                &path_owned,
380                &symbol_owned,
381                match_mode,
382                follow_depth,
383                max_depth,
384                ast_recursion_limit,
385                counter_clone,
386                ct_clone,
387                use_summary_for_task,
388            )
389        });
390
391        // Poll and emit progress every 100ms
392        let token = ProgressToken(NumberOrString::String(
393            format!(
394                "analyze-symbol-{}",
395                std::time::SystemTime::now()
396                    .duration_since(std::time::UNIX_EPOCH)
397                    .map(|d| d.as_nanos())
398                    .unwrap_or(0)
399            )
400            .into(),
401        ));
402        let peer = self.peer.lock().await.clone();
403        let mut last_progress = 0usize;
404        let mut cancelled = false;
405        loop {
406            tokio::time::sleep(std::time::Duration::from_millis(100)).await;
407            if ct.is_cancelled() {
408                cancelled = true;
409                break;
410            }
411            let current = counter.load(std::sync::atomic::Ordering::Relaxed);
412            if current != last_progress && total_files > 0 {
413                self.emit_progress(
414                    peer.clone(),
415                    &token,
416                    current as f64,
417                    total_files as f64,
418                    format!(
419                        "Analyzing {}/{} files for symbol '{}'",
420                        current, total_files, params.symbol
421                    ),
422                )
423                .await;
424                last_progress = current;
425            }
426            if handle.is_finished() {
427                break;
428            }
429        }
430
431        // Emit final 100% progress only if not cancelled
432        if !cancelled && total_files > 0 {
433            self.emit_progress(
434                peer.clone(),
435                &token,
436                total_files as f64,
437                total_files as f64,
438                format!(
439                    "Completed analyzing {} files for symbol '{}'",
440                    total_files, params.symbol
441                ),
442            )
443            .await;
444        }
445
446        let mut output = match handle.await {
447            Ok(Ok(output)) => output,
448            Ok(Err(analyze::AnalyzeError::Cancelled)) => {
449                return Err(ErrorData::new(
450                    rmcp::model::ErrorCode::INTERNAL_ERROR,
451                    "Analysis cancelled".to_string(),
452                    error_meta("transient", true, "analysis was cancelled"),
453                ));
454            }
455            Ok(Err(e)) => {
456                return Err(ErrorData::new(
457                    rmcp::model::ErrorCode::INTERNAL_ERROR,
458                    format!("Error analyzing symbol: {}", e),
459                    error_meta("resource", false, "check symbol name and file"),
460                ));
461            }
462            Err(e) => {
463                return Err(ErrorData::new(
464                    rmcp::model::ErrorCode::INTERNAL_ERROR,
465                    format!("Task join error: {}", e),
466                    error_meta("transient", true, "retry the request"),
467                ));
468            }
469        };
470
471        // Auto-detect: if no explicit summary param and output exceeds limit,
472        // re-run analysis with use_summary=true
473        if params.output_control.summary.is_none()
474            && params.output_control.force != Some(true)
475            && output.formatted.len() > SIZE_LIMIT
476        {
477            let path_owned2 = Path::new(&params.path).to_path_buf();
478            let symbol_owned2 = params.symbol.clone();
479            let match_mode2 = params.match_mode.clone().unwrap_or_default();
480            let follow_depth2 = params.follow_depth.unwrap_or(1);
481            let max_depth2 = params.max_depth;
482            let ast_recursion_limit2 = params.ast_recursion_limit;
483            let counter2 = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
484            let ct2 = ct.clone();
485            let summary_result = tokio::task::spawn_blocking(move || {
486                analyze::analyze_focused_with_progress(
487                    &path_owned2,
488                    &symbol_owned2,
489                    match_mode2,
490                    follow_depth2,
491                    max_depth2,
492                    ast_recursion_limit2,
493                    counter2,
494                    ct2,
495                    true, // use_summary=true
496                )
497            })
498            .await;
499            match summary_result {
500                Ok(Ok(summary_output)) => {
501                    output.formatted = summary_output.formatted;
502                }
503                _ => {
504                    // Fallback: return error (summary generation failed)
505                    let estimated_tokens = output.formatted.len() / 4;
506                    let message = format!(
507                        "Output exceeds 50K chars ({} chars, ~{} tokens). Use summary=true or force=true.",
508                        output.formatted.len(),
509                        estimated_tokens
510                    );
511                    return Err(ErrorData::new(
512                        rmcp::model::ErrorCode::INVALID_PARAMS,
513                        message,
514                        error_meta("validation", false, "use summary=true or force=true"),
515                    ));
516                }
517            }
518        } else if output.formatted.len() > SIZE_LIMIT
519            && params.output_control.force != Some(true)
520            && params.output_control.summary == Some(false)
521        {
522            // Explicit summary=false with large output: return error
523            let estimated_tokens = output.formatted.len() / 4;
524            let message = format!(
525                "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
526                 - force=true to return full output\n\
527                 - summary=true to get compact summary\n\
528                 - Narrow your scope (smaller directory, specific file)",
529                output.formatted.len(),
530                estimated_tokens
531            );
532            return Err(ErrorData::new(
533                rmcp::model::ErrorCode::INVALID_PARAMS,
534                message,
535                error_meta(
536                    "validation",
537                    false,
538                    "use force=true, summary=true, or narrow scope",
539                ),
540            ));
541        }
542
543        Ok(output)
544    }
545
546    #[instrument(skip(self, context))]
547    #[tool(
548        name = "analyze_directory",
549        description = "Analyze directory structure and code metrics for multi-file overview. Use this tool for directories; use analyze_file for a single file. Returns a tree with LOC, function count, class count, and test file markers. Respects .gitignore (results may differ from raw filesystem listing because .gitignore rules are applied). For repos with 1000+ files, use max_depth=2-3 and summary=true to stay within token budgets. Note: max_depth controls what is analyzed (traversal depth), while page_size controls how results are returned (chunking); these are independent. Strategy comparison: prefer pagination (page_size=50) over force=true to reduce per-call token overhead; use summary=true when counts and structure are sufficient and no pagination is needed; force=true is an escape hatch for exceptional cases. Empty directories return an empty tree with zero counts. Output auto-summarizes at 50K chars; use summary=true to force compact output. Paginate large results with cursor and page_size. Example queries: Analyze the src/ directory to understand module structure; What files are in the tests/ directory and how large are they? summary=true and cursor are mutually exclusive; passing both returns an error.",
550        output_schema = schema_for_type::<analyze::AnalysisOutput>(),
551        annotations(
552            title = "Analyze Directory",
553            read_only_hint = true,
554            destructive_hint = false,
555            idempotent_hint = true,
556            open_world_hint = false
557        )
558    )]
559    async fn analyze_directory(
560        &self,
561        params: Parameters<AnalyzeDirectoryParams>,
562        context: RequestContext<RoleServer>,
563    ) -> Result<CallToolResult, ErrorData> {
564        let params = params.0;
565        let ct = context.ct.clone();
566        let _t_start = std::time::Instant::now();
567        let _param_path = params.path.clone();
568        let _max_depth_val = params.max_depth;
569        let _seq = self
570            .session_call_seq
571            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
572        let _sid = self.session_id.lock().await.clone();
573
574        // Call handler for analysis and progress tracking
575        let mut output = match self.handle_overview_mode(&params, ct).await {
576            Ok(v) => v,
577            Err(e) => return Ok(err_to_tool_result(e)),
578        };
579
580        // summary=true (explicit) and cursor are mutually exclusive.
581        // Auto-summarization (summary=None + large output) must NOT block cursor pagination.
582        if summary_cursor_conflict(
583            params.output_control.summary,
584            params.pagination.cursor.as_deref(),
585        ) {
586            return Ok(err_to_tool_result(ErrorData::new(
587                rmcp::model::ErrorCode::INVALID_PARAMS,
588                "summary=true is incompatible with a pagination cursor; use one or the other"
589                    .to_string(),
590                error_meta("validation", false, "remove cursor or set summary=false"),
591            )));
592        }
593
594        // Apply summary/output size limiting logic
595        let use_summary = if params.output_control.force == Some(true) {
596            false
597        } else if params.output_control.summary == Some(true) {
598            true
599        } else if params.output_control.summary == Some(false) {
600            false
601        } else {
602            output.formatted.len() > SIZE_LIMIT
603        };
604
605        if use_summary {
606            output.formatted = format_summary(
607                &output.entries,
608                &output.files,
609                params.max_depth,
610                Some(Path::new(&params.path)),
611            );
612        }
613
614        // Decode pagination cursor if provided
615        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
616        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
617            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
618                ErrorData::new(
619                    rmcp::model::ErrorCode::INVALID_PARAMS,
620                    e.to_string(),
621                    error_meta("validation", false, "invalid cursor format"),
622                )
623            }) {
624                Ok(v) => v,
625                Err(e) => return Ok(err_to_tool_result(e)),
626            };
627            cursor_data.offset
628        } else {
629            0
630        };
631
632        // Apply pagination to files
633        let paginated =
634            match paginate_slice(&output.files, offset, page_size, PaginationMode::Default) {
635                Ok(v) => v,
636                Err(e) => {
637                    return Ok(err_to_tool_result(ErrorData::new(
638                        rmcp::model::ErrorCode::INTERNAL_ERROR,
639                        e.to_string(),
640                        error_meta("transient", true, "retry the request"),
641                    )));
642                }
643            };
644
645        let verbose = params.output_control.verbose.unwrap_or(false);
646        if !use_summary {
647            output.formatted = format_structure_paginated(
648                &paginated.items,
649                paginated.total,
650                params.max_depth,
651                Some(Path::new(&params.path)),
652                verbose,
653            );
654        }
655
656        // Update next_cursor in output after pagination (unless using summary mode)
657        if use_summary {
658            output.next_cursor = None;
659        } else {
660            output.next_cursor = paginated.next_cursor.clone();
661        }
662
663        // Build final text output with pagination cursor if present (unless using summary mode)
664        let mut final_text = output.formatted.clone();
665        if !use_summary && let Some(cursor) = paginated.next_cursor {
666            final_text.push('\n');
667            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
668        }
669
670        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
671            .with_meta(Some(no_cache_meta()));
672        let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
673        result.structured_content = Some(structured);
674        let _dur = _t_start.elapsed().as_millis() as u64;
675        self.metrics_tx.send(crate::metrics::MetricEvent {
676            ts: crate::metrics::unix_ms(),
677            tool: "analyze_directory",
678            duration_ms: _dur,
679            output_chars: final_text.chars().count(),
680            param_path_depth: crate::metrics::path_component_count(&_param_path),
681            max_depth: _max_depth_val,
682            result: "ok",
683            error_type: None,
684            session_id: _sid,
685            seq: Some(_seq),
686        });
687        Ok(result)
688    }
689
690    #[instrument(skip(self, context))]
691    #[tool(
692        name = "analyze_file",
693        description = "Extract semantic structure from a single source file only; pass a directory to analyze_directory instead. Returns functions with signatures, types, and line ranges; class and method definitions with inheritance, fields, and imports. Supported languages: Rust, Go, Java, Python, TypeScript, TSX; unsupported file extensions return an error. Common mistake: passing a directory path returns an error; use analyze_directory for directories. Generated code with deeply nested ASTs may exceed 50K chars; use summary=true to get counts only. Supports pagination for large files via cursor/page_size. Use summary=true for compact output. Example queries: What functions are defined in src/lib.rs?; Show me the classes and their methods in src/analyzer.py",
694        output_schema = schema_for_type::<analyze::FileAnalysisOutput>(),
695        annotations(
696            title = "Analyze File",
697            read_only_hint = true,
698            destructive_hint = false,
699            idempotent_hint = true,
700            open_world_hint = false
701        )
702    )]
703    async fn analyze_file(
704        &self,
705        params: Parameters<AnalyzeFileParams>,
706        context: RequestContext<RoleServer>,
707    ) -> Result<CallToolResult, ErrorData> {
708        let params = params.0;
709        let _ct = context.ct.clone();
710        let _t_start = std::time::Instant::now();
711        let _param_path = params.path.clone();
712        let _seq = self
713            .session_call_seq
714            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
715        let _sid = self.session_id.lock().await.clone();
716
717        // Call handler for analysis and caching
718        let arc_output = match self.handle_file_details_mode(&params).await {
719            Ok(v) => v,
720            Err(e) => return Ok(err_to_tool_result(e)),
721        };
722
723        // Clone only the two fields that may be mutated per-request (formatted and
724        // next_cursor). The heavy SemanticAnalysis data is shared via Arc and never
725        // modified, so we borrow it directly from the cached pointer.
726        let mut formatted = arc_output.formatted.clone();
727        let line_count = arc_output.line_count;
728
729        // Apply summary/output size limiting logic
730        let use_summary = if params.output_control.force == Some(true) {
731            false
732        } else if params.output_control.summary == Some(true) {
733            true
734        } else if params.output_control.summary == Some(false) {
735            false
736        } else {
737            formatted.len() > SIZE_LIMIT
738        };
739
740        if use_summary {
741            formatted = format_file_details_summary(&arc_output.semantic, &params.path, line_count);
742        } else if formatted.len() > SIZE_LIMIT && params.output_control.force != Some(true) {
743            let estimated_tokens = formatted.len() / 4;
744            let message = format!(
745                "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
746                 - force=true to return full output\n\
747                 - Narrow your scope (smaller directory, specific file)\n\
748                 - Use analyze_symbol mode for targeted analysis\n\
749                 - Reduce max_depth parameter",
750                formatted.len(),
751                estimated_tokens
752            );
753            return Ok(err_to_tool_result(ErrorData::new(
754                rmcp::model::ErrorCode::INVALID_PARAMS,
755                message,
756                error_meta("validation", false, "use force=true or narrow scope"),
757            )));
758        }
759
760        // Decode pagination cursor if provided (analyze_file)
761        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
762        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
763            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
764                ErrorData::new(
765                    rmcp::model::ErrorCode::INVALID_PARAMS,
766                    e.to_string(),
767                    error_meta("validation", false, "invalid cursor format"),
768                )
769            }) {
770                Ok(v) => v,
771                Err(e) => return Ok(err_to_tool_result(e)),
772            };
773            cursor_data.offset
774        } else {
775            0
776        };
777
778        // Paginate functions
779        let paginated = match paginate_slice(
780            &arc_output.semantic.functions,
781            offset,
782            page_size,
783            PaginationMode::Default,
784        ) {
785            Ok(v) => v,
786            Err(e) => {
787                return Ok(err_to_tool_result(ErrorData::new(
788                    rmcp::model::ErrorCode::INTERNAL_ERROR,
789                    e.to_string(),
790                    error_meta("transient", true, "retry the request"),
791                )));
792            }
793        };
794
795        // Regenerate formatted output using the paginated formatter (handles verbose and pagination correctly)
796        let verbose = params.output_control.verbose.unwrap_or(false);
797        if !use_summary {
798            formatted = format_file_details_paginated(
799                &paginated.items,
800                paginated.total,
801                &arc_output.semantic,
802                &params.path,
803                line_count,
804                offset,
805                verbose,
806            );
807        }
808
809        // Capture next_cursor from pagination result (unless using summary mode)
810        let next_cursor = if use_summary {
811            None
812        } else {
813            paginated.next_cursor.clone()
814        };
815
816        // Build final text output with pagination cursor if present (unless using summary mode)
817        let mut final_text = formatted.clone();
818        if !use_summary && let Some(ref cursor) = next_cursor {
819            final_text.push('\n');
820            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
821        }
822
823        // Build the response output, sharing SemanticAnalysis from the Arc to avoid cloning it.
824        let response_output = analyze::FileAnalysisOutput {
825            formatted,
826            semantic: arc_output.semantic.clone(),
827            line_count,
828            next_cursor,
829        };
830
831        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
832            .with_meta(Some(no_cache_meta()));
833        let structured = serde_json::to_value(&response_output).unwrap_or(Value::Null);
834        result.structured_content = Some(structured);
835        let _dur = _t_start.elapsed().as_millis() as u64;
836        self.metrics_tx.send(crate::metrics::MetricEvent {
837            ts: crate::metrics::unix_ms(),
838            tool: "analyze_file",
839            duration_ms: _dur,
840            output_chars: final_text.chars().count(),
841            param_path_depth: crate::metrics::path_component_count(&_param_path),
842            max_depth: None,
843            result: "ok",
844            error_type: None,
845            session_id: _sid,
846            seq: Some(_seq),
847        });
848        Ok(result)
849    }
850
851    #[instrument(skip(self, context))]
852    #[tool(
853        name = "analyze_symbol",
854        description = "Build call graph for a named function or method across all files in a directory to trace a specific function's usage. Returns direct callers and callees. Default symbol lookup is case-sensitive exact-match (match_mode=exact); myFunc and myfunc are different symbols. If exact match fails, retry with match_mode=insensitive for a case-insensitive search. To list candidates matching a prefix, use match_mode=prefix. To find symbols containing a substring, use match_mode=contains. When prefix or contains matches multiple symbols, an error is returned listing all candidates so you can refine to a single match. A symbol unknown to the graph (not defined and not referenced) returns an error; a symbol that is defined but has no callers or callees returns empty chains without error. follow_depth warning: each increment can multiply output size exponentially; use follow_depth=1 for production use; follow_depth=2+ only for targeted deep dives. Use cursor/page_size to paginate call chains when results exceed page_size. Example queries: Find all callers of the parse_config function; Trace the call chain for MyClass.process_request up to 2 levels deep",
855        output_schema = schema_for_type::<analyze::FocusedAnalysisOutput>(),
856        annotations(
857            title = "Analyze Symbol",
858            read_only_hint = true,
859            destructive_hint = false,
860            idempotent_hint = true,
861            open_world_hint = false
862        )
863    )]
864    async fn analyze_symbol(
865        &self,
866        params: Parameters<AnalyzeSymbolParams>,
867        context: RequestContext<RoleServer>,
868    ) -> Result<CallToolResult, ErrorData> {
869        let params = params.0;
870        let ct = context.ct.clone();
871        let _t_start = std::time::Instant::now();
872        let _param_path = params.path.clone();
873        let _max_depth_val = params.follow_depth;
874        let _seq = self
875            .session_call_seq
876            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
877        let _sid = self.session_id.lock().await.clone();
878
879        // Call handler for analysis and progress tracking
880        let mut output = match self.handle_focused_mode(&params, ct).await {
881            Ok(v) => v,
882            Err(e) => return Ok(err_to_tool_result(e)),
883        };
884
885        // Decode pagination cursor if provided (analyze_symbol)
886        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
887        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
888            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
889                ErrorData::new(
890                    rmcp::model::ErrorCode::INVALID_PARAMS,
891                    e.to_string(),
892                    error_meta("validation", false, "invalid cursor format"),
893                )
894            }) {
895                Ok(v) => v,
896                Err(e) => return Ok(err_to_tool_result(e)),
897            };
898            cursor_data.offset
899        } else {
900            0
901        };
902
903        // SymbolFocus pagination: decode cursor mode to determine callers vs callees
904        let cursor_mode = if let Some(ref cursor_str) = params.pagination.cursor {
905            decode_cursor(cursor_str)
906                .map(|c| c.mode)
907                .unwrap_or(PaginationMode::Callers)
908        } else {
909            PaginationMode::Callers
910        };
911
912        let paginated_next_cursor = match cursor_mode {
913            PaginationMode::Callers => {
914                let (paginated_items, paginated_next) = match paginate_focus_chains(
915                    &output.prod_chains,
916                    PaginationMode::Callers,
917                    offset,
918                    page_size,
919                ) {
920                    Ok(v) => v,
921                    Err(e) => return Ok(err_to_tool_result(e)),
922                };
923
924                let verbose = params.output_control.verbose.unwrap_or(false);
925                if paginated_next.is_some() || offset > 0 || !verbose {
926                    let base_path = Path::new(&params.path);
927                    output.formatted = format_focused_paginated(
928                        &paginated_items,
929                        output.prod_chains.len(),
930                        PaginationMode::Callers,
931                        &params.symbol,
932                        &output.prod_chains,
933                        &output.test_chains,
934                        &output.outgoing_chains,
935                        output.def_count,
936                        offset,
937                        Some(base_path),
938                        verbose,
939                    );
940                    paginated_next
941                } else {
942                    None
943                }
944            }
945            PaginationMode::Callees => {
946                let (paginated_items, paginated_next) = match paginate_focus_chains(
947                    &output.outgoing_chains,
948                    PaginationMode::Callees,
949                    offset,
950                    page_size,
951                ) {
952                    Ok(v) => v,
953                    Err(e) => return Ok(err_to_tool_result(e)),
954                };
955
956                let verbose = params.output_control.verbose.unwrap_or(false);
957                if paginated_next.is_some() || offset > 0 || !verbose {
958                    let base_path = Path::new(&params.path);
959                    output.formatted = format_focused_paginated(
960                        &paginated_items,
961                        output.outgoing_chains.len(),
962                        PaginationMode::Callees,
963                        &params.symbol,
964                        &output.prod_chains,
965                        &output.test_chains,
966                        &output.outgoing_chains,
967                        output.def_count,
968                        offset,
969                        Some(base_path),
970                        verbose,
971                    );
972                    paginated_next
973                } else {
974                    None
975                }
976            }
977            PaginationMode::Default => {
978                unreachable!("SymbolFocus should only use Callers or Callees modes")
979            }
980        };
981
982        // Build final text output with pagination cursor if present
983        let mut final_text = output.formatted.clone();
984        if let Some(cursor) = paginated_next_cursor {
985            final_text.push('\n');
986            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
987        }
988
989        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
990            .with_meta(Some(no_cache_meta()));
991        let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
992        result.structured_content = Some(structured);
993        let _dur = _t_start.elapsed().as_millis() as u64;
994        self.metrics_tx.send(crate::metrics::MetricEvent {
995            ts: crate::metrics::unix_ms(),
996            tool: "analyze_symbol",
997            duration_ms: _dur,
998            output_chars: final_text.chars().count(),
999            param_path_depth: crate::metrics::path_component_count(&_param_path),
1000            max_depth: _max_depth_val,
1001            result: "ok",
1002            error_type: None,
1003            session_id: _sid,
1004            seq: Some(_seq),
1005        });
1006        Ok(result)
1007    }
1008
1009    #[instrument(skip(self))]
1010    #[tool(
1011        name = "analyze_module",
1012        description = "Index functions and imports in a single source file with minimal token cost. Returns name, line_count, language, function names with line numbers, and import list only -- no signatures, no types, no call graphs, no references. ~75% smaller output than analyze_file. Use analyze_file when you need function signatures, types, or class details; use analyze_module when you only need a function/import index to orient in a file or survey many files in sequence. Use analyze_directory for multi-file overviews; use analyze_symbol to trace call graphs for a specific function. Supported languages: Rust, Go, Java, Python, TypeScript, TSX; unsupported extensions return an error. Example queries: What functions are defined in src/analyze.rs?; List all imports in src/lib.rs. Pagination, summary, force, and verbose parameters are not supported by this tool.",
1013        output_schema = schema_for_type::<types::ModuleInfo>(),
1014        annotations(
1015            title = "Analyze Module",
1016            read_only_hint = true,
1017            destructive_hint = false,
1018            idempotent_hint = true,
1019            open_world_hint = false
1020        )
1021    )]
1022    async fn analyze_module(
1023        &self,
1024        params: Parameters<AnalyzeModuleParams>,
1025        _context: RequestContext<RoleServer>,
1026    ) -> Result<CallToolResult, ErrorData> {
1027        let params = params.0;
1028        let _t_start = std::time::Instant::now();
1029        let _param_path = params.path.clone();
1030        let _seq = self
1031            .session_call_seq
1032            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1033        let _sid = self.session_id.lock().await.clone();
1034
1035        // Issue 340: Guard against directory paths
1036        if std::fs::metadata(&params.path)
1037            .map(|m| m.is_dir())
1038            .unwrap_or(false)
1039        {
1040            let _dur = _t_start.elapsed().as_millis() as u64;
1041            self.metrics_tx.send(crate::metrics::MetricEvent {
1042                ts: crate::metrics::unix_ms(),
1043                tool: "analyze_module",
1044                duration_ms: _dur,
1045                output_chars: 0,
1046                param_path_depth: crate::metrics::path_component_count(&_param_path),
1047                max_depth: None,
1048                result: "error",
1049                error_type: Some("invalid_params".to_string()),
1050                session_id: _sid.clone(),
1051                seq: Some(_seq),
1052            });
1053            return Ok(err_to_tool_result(ErrorData::new(
1054                rmcp::model::ErrorCode::INVALID_PARAMS,
1055                format!(
1056                    "'{}' is a directory. Use analyze_directory to analyze a directory, or pass a specific file path to analyze_module.",
1057                    params.path
1058                ),
1059                error_meta("validation", false, "use analyze_directory for directories"),
1060            )));
1061        }
1062
1063        let module_info = match analyze::analyze_module_file(&params.path).map_err(|e| {
1064            ErrorData::new(
1065                rmcp::model::ErrorCode::INVALID_PARAMS,
1066                format!("Failed to analyze module: {}", e),
1067                error_meta(
1068                    "validation",
1069                    false,
1070                    "ensure file exists, is readable, and has a supported extension",
1071                ),
1072            )
1073        }) {
1074            Ok(v) => v,
1075            Err(e) => return Ok(err_to_tool_result(e)),
1076        };
1077
1078        let text = format_module_info(&module_info);
1079        let mut result = CallToolResult::success(vec![Content::text(text.clone())])
1080            .with_meta(Some(no_cache_meta()));
1081        let structured = match serde_json::to_value(&module_info).map_err(|e| {
1082            ErrorData::new(
1083                rmcp::model::ErrorCode::INTERNAL_ERROR,
1084                format!("serialization failed: {}", e),
1085                error_meta("internal", false, "report this as a bug"),
1086            )
1087        }) {
1088            Ok(v) => v,
1089            Err(e) => return Ok(err_to_tool_result(e)),
1090        };
1091        result.structured_content = Some(structured);
1092        let _dur = _t_start.elapsed().as_millis() as u64;
1093        self.metrics_tx.send(crate::metrics::MetricEvent {
1094            ts: crate::metrics::unix_ms(),
1095            tool: "analyze_module",
1096            duration_ms: _dur,
1097            output_chars: text.chars().count(),
1098            param_path_depth: crate::metrics::path_component_count(&_param_path),
1099            max_depth: None,
1100            result: "ok",
1101            error_type: None,
1102            session_id: _sid,
1103            seq: Some(_seq),
1104        });
1105        Ok(result)
1106    }
1107}
1108
1109#[tool_handler]
1110impl ServerHandler for CodeAnalyzer {
1111    fn get_info(&self) -> InitializeResult {
1112        let excluded = crate::formatter::EXCLUDED_DIRS.join(", ");
1113        let instructions = format!(
1114            "Recommended workflow for unknown repositories:\n\
1115            1. Start with analyze_directory(path=<repo_root>, max_depth=2, summary=true) to identify the source package directory \
1116            (typically the largest directory by file count; exclude {excluded}).\n\
1117            2. Re-run analyze_directory(path=<source_package>, max_depth=2, summary=true) for a module map with per-package class and function counts.\n\
1118            3. Use analyze_file on key files identified in step 2 (prefer files with high class counts for framework entry points).\n\
1119            4. Use analyze_symbol to trace call graphs for specific functions found in step 3.\n\
1120            Use analyze_module for a minimal schema (name, line count, functions, imports) when token budget is critical. \
1121            Prefer summary=true on large directories (1000+ files). Set max_depth=2 for the first call; increase only if packages are too large to differentiate. \
1122            Paginate with cursor/page_size. For subagents: DISABLE_PROMPT_CACHING=1."
1123        );
1124        let capabilities = ServerCapabilities::builder()
1125            .enable_logging()
1126            .enable_tools()
1127            .enable_tool_list_changed()
1128            .enable_completions()
1129            .build();
1130        let server_info = Implementation::new("code-analyze-mcp", env!("CARGO_PKG_VERSION"))
1131            .with_title("Code Analyze MCP")
1132            .with_description("MCP server for code structure analysis using tree-sitter");
1133        InitializeResult::new(capabilities)
1134            .with_server_info(server_info)
1135            .with_instructions(&instructions)
1136    }
1137
1138    async fn on_initialized(&self, context: NotificationContext<RoleServer>) {
1139        let mut peer_lock = self.peer.lock().await;
1140        *peer_lock = Some(context.peer.clone());
1141        drop(peer_lock);
1142
1143        // Generate session_id in MILLIS-N format
1144        let millis = std::time::SystemTime::now()
1145            .duration_since(std::time::UNIX_EPOCH)
1146            .unwrap_or_default()
1147            .as_millis() as u64;
1148        let counter = GLOBAL_SESSION_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
1149        let sid = format!("{}-{}", millis, counter);
1150        {
1151            let mut session_id_lock = self.session_id.lock().await;
1152            *session_id_lock = Some(sid);
1153        }
1154        self.session_call_seq
1155            .store(0, std::sync::atomic::Ordering::Relaxed);
1156
1157        // Spawn consumer task to drain log events from channel with batching.
1158        let peer = self.peer.clone();
1159        let event_rx = self.event_rx.clone();
1160
1161        tokio::spawn(async move {
1162            let rx = {
1163                let mut rx_lock = event_rx.lock().await;
1164                rx_lock.take()
1165            };
1166
1167            if let Some(mut receiver) = rx {
1168                let mut buffer = Vec::with_capacity(64);
1169                loop {
1170                    // Drain up to 64 events from channel
1171                    receiver.recv_many(&mut buffer, 64).await;
1172
1173                    if buffer.is_empty() {
1174                        // Channel closed, exit consumer task
1175                        break;
1176                    }
1177
1178                    // Acquire peer lock once per batch
1179                    let peer_lock = peer.lock().await;
1180                    if let Some(peer) = peer_lock.as_ref() {
1181                        for log_event in buffer.drain(..) {
1182                            let notification = ServerNotification::LoggingMessageNotification(
1183                                Notification::new(LoggingMessageNotificationParam {
1184                                    level: log_event.level,
1185                                    logger: Some(log_event.logger),
1186                                    data: log_event.data,
1187                                }),
1188                            );
1189                            if let Err(e) = peer.send_notification(notification).await {
1190                                warn!("Failed to send logging notification: {}", e);
1191                            }
1192                        }
1193                    }
1194                }
1195            }
1196        });
1197    }
1198
1199    #[instrument(skip(self, _context))]
1200    async fn on_cancelled(
1201        &self,
1202        notification: CancelledNotificationParam,
1203        _context: NotificationContext<RoleServer>,
1204    ) {
1205        tracing::info!(
1206            request_id = ?notification.request_id,
1207            reason = ?notification.reason,
1208            "Received cancellation notification"
1209        );
1210    }
1211
1212    #[instrument(skip(self, _context))]
1213    async fn complete(
1214        &self,
1215        request: CompleteRequestParams,
1216        _context: RequestContext<RoleServer>,
1217    ) -> Result<CompleteResult, ErrorData> {
1218        // Dispatch on argument name: "path" or "symbol"
1219        let argument_name = &request.argument.name;
1220        let argument_value = &request.argument.value;
1221
1222        let completions = match argument_name.as_str() {
1223            "path" => {
1224                // Path completions: use current directory as root
1225                let root = Path::new(".");
1226                completion::path_completions(root, argument_value)
1227            }
1228            "symbol" => {
1229                // Symbol completions: need the path argument from context
1230                let path_arg = request
1231                    .context
1232                    .as_ref()
1233                    .and_then(|ctx| ctx.get_argument("path"));
1234
1235                match path_arg {
1236                    Some(path_str) => {
1237                        let path = Path::new(path_str);
1238                        completion::symbol_completions(&self.cache, path, argument_value)
1239                    }
1240                    None => Vec::new(),
1241                }
1242            }
1243            _ => Vec::new(),
1244        };
1245
1246        // Create CompletionInfo with has_more flag if >100 results
1247        let total_count = completions.len() as u32;
1248        let (values, has_more) = if completions.len() > 100 {
1249            (completions.into_iter().take(100).collect(), true)
1250        } else {
1251            (completions, false)
1252        };
1253
1254        let completion_info =
1255            match CompletionInfo::with_pagination(values, Some(total_count), has_more) {
1256                Ok(info) => info,
1257                Err(_) => {
1258                    // Graceful degradation: return empty on error
1259                    CompletionInfo::with_all_values(Vec::new())
1260                        .unwrap_or_else(|_| CompletionInfo::new(Vec::new()).unwrap())
1261                }
1262            };
1263
1264        Ok(CompleteResult::new(completion_info))
1265    }
1266
1267    async fn set_level(
1268        &self,
1269        params: SetLevelRequestParams,
1270        _context: RequestContext<RoleServer>,
1271    ) -> Result<(), ErrorData> {
1272        let level_filter = match params.level {
1273            LoggingLevel::Debug => LevelFilter::DEBUG,
1274            LoggingLevel::Info => LevelFilter::INFO,
1275            LoggingLevel::Notice => LevelFilter::INFO,
1276            LoggingLevel::Warning => LevelFilter::WARN,
1277            LoggingLevel::Error => LevelFilter::ERROR,
1278            LoggingLevel::Critical => LevelFilter::ERROR,
1279            LoggingLevel::Alert => LevelFilter::ERROR,
1280            LoggingLevel::Emergency => LevelFilter::ERROR,
1281        };
1282
1283        let mut filter_lock = self.log_level_filter.lock().unwrap();
1284        *filter_lock = level_filter;
1285        Ok(())
1286    }
1287}
1288
1289#[cfg(test)]
1290mod tests {
1291    use super::*;
1292
1293    #[tokio::test]
1294    async fn test_emit_progress_none_peer_is_noop() {
1295        let peer = Arc::new(TokioMutex::new(None));
1296        let log_level_filter = Arc::new(Mutex::new(LevelFilter::INFO));
1297        let (_tx, rx) = tokio::sync::mpsc::unbounded_channel();
1298        let (metrics_tx, _metrics_rx) = tokio::sync::mpsc::unbounded_channel();
1299        let analyzer = CodeAnalyzer::new(
1300            peer,
1301            log_level_filter,
1302            rx,
1303            crate::metrics::MetricsSender(metrics_tx),
1304        );
1305        let token = ProgressToken(NumberOrString::String("test".into()));
1306        // Should complete without panic
1307        analyzer
1308            .emit_progress(None, &token, 0.0, 10.0, "test".to_string())
1309            .await;
1310    }
1311
1312    #[tokio::test]
1313    async fn test_handle_overview_mode_verbose_no_summary_block() {
1314        use crate::pagination::{PaginationMode, paginate_slice};
1315        use crate::types::{AnalyzeDirectoryParams, OutputControlParams, PaginationParams};
1316        use tempfile::TempDir;
1317
1318        let tmp = TempDir::new().unwrap();
1319        std::fs::write(tmp.path().join("main.rs"), "fn main() {}").unwrap();
1320
1321        let peer = Arc::new(TokioMutex::new(None));
1322        let log_level_filter = Arc::new(Mutex::new(LevelFilter::INFO));
1323        let (_tx, rx) = tokio::sync::mpsc::unbounded_channel();
1324        let (metrics_tx, _metrics_rx) = tokio::sync::mpsc::unbounded_channel();
1325        let analyzer = CodeAnalyzer::new(
1326            peer,
1327            log_level_filter,
1328            rx,
1329            crate::metrics::MetricsSender(metrics_tx),
1330        );
1331
1332        let params = AnalyzeDirectoryParams {
1333            path: tmp.path().to_str().unwrap().to_string(),
1334            max_depth: None,
1335            pagination: PaginationParams {
1336                cursor: None,
1337                page_size: None,
1338            },
1339            output_control: OutputControlParams {
1340                summary: None,
1341                force: None,
1342                verbose: Some(true),
1343            },
1344        };
1345
1346        let ct = tokio_util::sync::CancellationToken::new();
1347        let output = analyzer.handle_overview_mode(&params, ct).await.unwrap();
1348
1349        // Replicate the handler's formatting path (the fix site)
1350        let use_summary = output.formatted.len() > SIZE_LIMIT; // summary=None, force=None, small output
1351        let paginated =
1352            paginate_slice(&output.files, 0, DEFAULT_PAGE_SIZE, PaginationMode::Default).unwrap();
1353        let verbose = true;
1354        let formatted = if !use_summary {
1355            format_structure_paginated(
1356                &paginated.items,
1357                paginated.total,
1358                params.max_depth,
1359                Some(std::path::Path::new(&params.path)),
1360                verbose,
1361            )
1362        } else {
1363            output.formatted.clone()
1364        };
1365
1366        // After the fix: verbose=true must not emit the SUMMARY: block
1367        assert!(
1368            !formatted.contains("SUMMARY:"),
1369            "verbose=true must not emit SUMMARY: block; got: {}",
1370            &formatted[..formatted.len().min(300)]
1371        );
1372        assert!(
1373            formatted.contains("PAGINATED:"),
1374            "verbose=true must emit PAGINATED: header"
1375        );
1376        assert!(
1377            formatted.contains("FILES [LOC, FUNCTIONS, CLASSES]"),
1378            "verbose=true must emit FILES section header"
1379        );
1380    }
1381}