Skip to main content

code_analyze_mcp/
lib.rs

1//! Rust MCP server for code structure analysis using tree-sitter.
2//!
3//! This crate exposes four MCP tools for multiple programming languages:
4//!
5//! - **analyze_directory**: Directory tree with file counts and structure
6//! - **analyze_file**: Semantic extraction (functions, classes, assignments, references)
7//! - **analyze_symbol**: Call graph analysis (callers and callees)
8//! - **analyze_module**: Lightweight function and import index
9//!
10//! Key types:
11//! - [`analyze::analyze_directory`]: Analyze entire directory tree
12//! - [`analyze::analyze_file`]: Analyze single file
13//! - [`parser::ElementExtractor`]: Parse language-specific elements
14//!
15//! Languages supported: Rust, Go, Java, Python, TypeScript, TSX, Fortran.
16
17pub mod analyze;
18pub mod cache;
19pub mod completion;
20pub mod formatter;
21pub mod graph;
22pub mod lang;
23pub mod languages;
24pub mod logging;
25pub mod metrics;
26pub mod pagination;
27pub mod parser;
28pub(crate) mod schema_helpers;
29pub mod test_detection;
30pub mod traversal;
31pub mod types;
32
33pub(crate) const EXCLUDED_DIRS: &[&str] = &[
34    "node_modules",
35    "vendor",
36    ".git",
37    "__pycache__",
38    "target",
39    "dist",
40    "build",
41    ".venv",
42];
43
44use cache::AnalysisCache;
45use formatter::{
46    format_file_details_paginated, format_file_details_summary, format_focused_paginated,
47    format_module_info, format_structure_paginated, format_summary,
48};
49use logging::LogEvent;
50use pagination::{
51    CursorData, DEFAULT_PAGE_SIZE, PaginationMode, decode_cursor, encode_cursor, paginate_slice,
52};
53use rmcp::handler::server::tool::{ToolRouter, schema_for_type};
54use rmcp::handler::server::wrapper::Parameters;
55use rmcp::model::{
56    CallToolResult, CancelledNotificationParam, CompleteRequestParams, CompleteResult,
57    CompletionInfo, Content, ErrorData, Implementation, InitializeResult, LoggingLevel,
58    LoggingMessageNotificationParam, Meta, Notification, NumberOrString, ProgressNotificationParam,
59    ProgressToken, ServerCapabilities, ServerNotification, SetLevelRequestParams,
60};
61use rmcp::service::{NotificationContext, RequestContext};
62use rmcp::{Peer, RoleServer, ServerHandler, tool, tool_handler, tool_router};
63use serde_json::Value;
64use std::path::Path;
65use std::sync::{Arc, Mutex};
66use tokio::sync::{Mutex as TokioMutex, mpsc};
67use tracing::{instrument, warn};
68use tracing_subscriber::filter::LevelFilter;
69use traversal::walk_directory;
70use types::{
71    AnalysisMode, AnalyzeDirectoryParams, AnalyzeFileParams, AnalyzeModuleParams,
72    AnalyzeSymbolParams,
73};
74
75static GLOBAL_SESSION_COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
76
77const SIZE_LIMIT: usize = 50_000;
78
79/// Returns `true` when `summary=true` and a `cursor` are both provided, which is an invalid
80/// combination since summary mode and pagination are mutually exclusive.
81pub fn summary_cursor_conflict(summary: Option<bool>, cursor: Option<&str>) -> bool {
82    summary == Some(true) && cursor.is_some()
83}
84
85fn error_meta(
86    category: &'static str,
87    is_retryable: bool,
88    suggested_action: &'static str,
89) -> Option<serde_json::Value> {
90    Some(serde_json::json!({
91        "errorCategory": category,
92        "isRetryable": is_retryable,
93        "suggestedAction": suggested_action,
94    }))
95}
96
97fn err_to_tool_result(e: ErrorData) -> CallToolResult {
98    CallToolResult::error(vec![Content::text(e.message)])
99}
100
101fn no_cache_meta() -> Meta {
102    let mut m = serde_json::Map::new();
103    m.insert(
104        "cache_hint".to_string(),
105        serde_json::Value::String("no-cache".to_string()),
106    );
107    Meta(m)
108}
109
110/// Helper function for paginating focus chains (callers or callees).
111/// Returns (items, re-encoded_cursor_option).
112fn paginate_focus_chains(
113    chains: &[graph::InternalCallChain],
114    mode: PaginationMode,
115    offset: usize,
116    page_size: usize,
117) -> Result<(Vec<graph::InternalCallChain>, Option<String>), ErrorData> {
118    let paginated = paginate_slice(chains, offset, page_size, mode).map_err(|e| {
119        ErrorData::new(
120            rmcp::model::ErrorCode::INTERNAL_ERROR,
121            e.to_string(),
122            error_meta("transient", true, "retry the request"),
123        )
124    })?;
125
126    if paginated.next_cursor.is_none() && offset == 0 {
127        return Ok((paginated.items, None));
128    }
129
130    let next = if let Some(raw_cursor) = paginated.next_cursor {
131        let decoded = decode_cursor(&raw_cursor).map_err(|e| {
132            ErrorData::new(
133                rmcp::model::ErrorCode::INVALID_PARAMS,
134                e.to_string(),
135                error_meta("validation", false, "invalid cursor format"),
136            )
137        })?;
138        Some(
139            encode_cursor(&CursorData {
140                mode,
141                offset: decoded.offset,
142            })
143            .map_err(|e| {
144                ErrorData::new(
145                    rmcp::model::ErrorCode::INVALID_PARAMS,
146                    e.to_string(),
147                    error_meta("validation", false, "invalid cursor format"),
148                )
149            })?,
150        )
151    } else {
152        None
153    };
154
155    Ok((paginated.items, next))
156}
157
158/// MCP server handler that wires the four analysis tools to the rmcp transport.
159///
160/// Holds shared state: tool router, analysis cache, peer connection, log-level filter,
161/// log event channel, metrics sender, and per-session sequence tracking.
162#[derive(Clone)]
163pub struct CodeAnalyzer {
164    tool_router: ToolRouter<Self>,
165    cache: AnalysisCache,
166    peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
167    log_level_filter: Arc<Mutex<LevelFilter>>,
168    event_rx: Arc<TokioMutex<Option<mpsc::UnboundedReceiver<LogEvent>>>>,
169    metrics_tx: crate::metrics::MetricsSender,
170    session_call_seq: Arc<std::sync::atomic::AtomicU32>,
171    session_id: Arc<TokioMutex<Option<String>>>,
172}
173
174#[tool_router]
175impl CodeAnalyzer {
176    pub fn list_tools() -> Vec<rmcp::model::Tool> {
177        Self::tool_router().list_all()
178    }
179
180    pub fn new(
181        peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
182        log_level_filter: Arc<Mutex<LevelFilter>>,
183        event_rx: mpsc::UnboundedReceiver<LogEvent>,
184        metrics_tx: crate::metrics::MetricsSender,
185    ) -> Self {
186        CodeAnalyzer {
187            tool_router: Self::tool_router(),
188            cache: AnalysisCache::new(100),
189            peer,
190            log_level_filter,
191            event_rx: Arc::new(TokioMutex::new(Some(event_rx))),
192            metrics_tx,
193            session_call_seq: Arc::new(std::sync::atomic::AtomicU32::new(0)),
194            session_id: Arc::new(TokioMutex::new(None)),
195        }
196    }
197
198    #[instrument(skip(self))]
199    async fn emit_progress(
200        &self,
201        peer: Option<Peer<RoleServer>>,
202        token: &ProgressToken,
203        progress: f64,
204        total: f64,
205        message: String,
206    ) {
207        if let Some(peer) = peer {
208            let notification = ServerNotification::ProgressNotification(Notification::new(
209                ProgressNotificationParam {
210                    progress_token: token.clone(),
211                    progress,
212                    total: Some(total),
213                    message: Some(message),
214                },
215            ));
216            if let Err(e) = peer.send_notification(notification).await {
217                warn!("Failed to send progress notification: {}", e);
218            }
219        }
220    }
221
222    /// Private helper: Extract analysis logic for overview mode (analyze_directory).
223    /// Returns the complete analysis output after spawning and monitoring progress.
224    /// Cancels the blocking task when `ct` is triggered; returns an error on cancellation.
225    #[instrument(skip(self, params, ct))]
226    async fn handle_overview_mode(
227        &self,
228        params: &AnalyzeDirectoryParams,
229        ct: tokio_util::sync::CancellationToken,
230    ) -> Result<analyze::AnalysisOutput, ErrorData> {
231        let path = Path::new(&params.path);
232        let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
233        let counter_clone = counter.clone();
234        let path_owned = path.to_path_buf();
235        let max_depth = params.max_depth;
236        let ct_clone = ct.clone();
237
238        // Single unbounded walk; filter in-memory to respect max_depth for analysis.
239        let all_entries = walk_directory(path, None).map_err(|e| {
240            ErrorData::new(
241                rmcp::model::ErrorCode::INTERNAL_ERROR,
242                format!("Failed to walk directory: {}", e),
243                error_meta("resource", false, "check path permissions and availability"),
244            )
245        })?;
246
247        // Compute subtree counts from the full entry set before filtering.
248        let subtree_counts = if max_depth.is_some_and(|d| d > 0) {
249            Some(traversal::subtree_counts_from_entries(path, &all_entries))
250        } else {
251            None
252        };
253
254        // Filter to depth-bounded subset for analysis.
255        let entries: Vec<traversal::WalkEntry> = if let Some(depth) = max_depth
256            && depth > 0
257        {
258            all_entries
259                .into_iter()
260                .filter(|e| e.depth <= depth as usize)
261                .collect()
262        } else {
263            all_entries
264        };
265
266        // Get total file count for progress reporting
267        let total_files = entries.iter().filter(|e| !e.is_dir).count();
268
269        // Spawn blocking analysis with progress tracking
270        let handle = tokio::task::spawn_blocking(move || {
271            analyze::analyze_directory_with_progress(&path_owned, entries, counter_clone, ct_clone)
272        });
273
274        // Poll and emit progress every 100ms
275        let token = ProgressToken(NumberOrString::String(
276            format!(
277                "analyze-overview-{}",
278                std::time::SystemTime::now()
279                    .duration_since(std::time::UNIX_EPOCH)
280                    .map(|d| d.as_nanos())
281                    .unwrap_or(0)
282            )
283            .into(),
284        ));
285        let peer = self.peer.lock().await.clone();
286        let mut last_progress = 0usize;
287        let mut cancelled = false;
288        loop {
289            tokio::time::sleep(std::time::Duration::from_millis(100)).await;
290            if ct.is_cancelled() {
291                cancelled = true;
292                break;
293            }
294            let current = counter.load(std::sync::atomic::Ordering::Relaxed);
295            if current != last_progress && total_files > 0 {
296                self.emit_progress(
297                    peer.clone(),
298                    &token,
299                    current as f64,
300                    total_files as f64,
301                    format!("Analyzing {}/{} files", current, total_files),
302                )
303                .await;
304                last_progress = current;
305            }
306            if handle.is_finished() {
307                break;
308            }
309        }
310
311        // Emit final 100% progress only if not cancelled
312        if !cancelled && total_files > 0 {
313            self.emit_progress(
314                peer.clone(),
315                &token,
316                total_files as f64,
317                total_files as f64,
318                format!("Completed analyzing {} files", total_files),
319            )
320            .await;
321        }
322
323        match handle.await {
324            Ok(Ok(mut output)) => {
325                output.subtree_counts = subtree_counts;
326                Ok(output)
327            }
328            Ok(Err(analyze::AnalyzeError::Cancelled)) => Err(ErrorData::new(
329                rmcp::model::ErrorCode::INTERNAL_ERROR,
330                "Analysis cancelled".to_string(),
331                error_meta("transient", true, "analysis was cancelled"),
332            )),
333            Ok(Err(e)) => Err(ErrorData::new(
334                rmcp::model::ErrorCode::INTERNAL_ERROR,
335                format!("Error analyzing directory: {}", e),
336                error_meta("resource", false, "check path and file permissions"),
337            )),
338            Err(e) => Err(ErrorData::new(
339                rmcp::model::ErrorCode::INTERNAL_ERROR,
340                format!("Task join error: {}", e),
341                error_meta("transient", true, "retry the request"),
342            )),
343        }
344    }
345
346    /// Private helper: Extract analysis logic for file details mode (analyze_file).
347    /// Returns the cached or newly analyzed file output.
348    #[instrument(skip(self, params))]
349    async fn handle_file_details_mode(
350        &self,
351        params: &AnalyzeFileParams,
352    ) -> Result<std::sync::Arc<analyze::FileAnalysisOutput>, ErrorData> {
353        // Build cache key from file metadata
354        let cache_key = std::fs::metadata(&params.path).ok().and_then(|meta| {
355            meta.modified().ok().map(|mtime| cache::CacheKey {
356                path: std::path::PathBuf::from(&params.path),
357                modified: mtime,
358                mode: AnalysisMode::FileDetails,
359            })
360        });
361
362        // Check cache first
363        if let Some(ref key) = cache_key
364            && let Some(cached) = self.cache.get(key)
365        {
366            return Ok(cached);
367        }
368
369        // Cache miss or no cache key, analyze and optionally store
370        match analyze::analyze_file(&params.path, params.ast_recursion_limit) {
371            Ok(output) => {
372                let arc_output = std::sync::Arc::new(output);
373                if let Some(ref key) = cache_key {
374                    self.cache.put(key.clone(), arc_output.clone());
375                }
376                Ok(arc_output)
377            }
378            Err(e) => Err(ErrorData::new(
379                rmcp::model::ErrorCode::INTERNAL_ERROR,
380                format!("Error analyzing file: {}", e),
381                error_meta("resource", false, "check file path and permissions"),
382            )),
383        }
384    }
385
386    /// Private helper: Extract analysis logic for focused mode (analyze_symbol).
387    /// Returns the complete focused analysis output after spawning and monitoring progress.
388    /// Cancels the blocking task when `ct` is triggered; returns an error on cancellation.
389    #[instrument(skip(self, params, ct))]
390    async fn handle_focused_mode(
391        &self,
392        params: &AnalyzeSymbolParams,
393        ct: tokio_util::sync::CancellationToken,
394    ) -> Result<analyze::FocusedAnalysisOutput, ErrorData> {
395        let follow_depth = params.follow_depth.unwrap_or(1);
396        let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
397        let counter_clone = counter.clone();
398        let path = Path::new(&params.path);
399        let path_owned = path.to_path_buf();
400        let max_depth = params.max_depth;
401        let symbol_owned = params.symbol.clone();
402        let match_mode = params.match_mode.clone().unwrap_or_default();
403        let ast_recursion_limit = params.ast_recursion_limit;
404        let ct_clone = ct.clone();
405
406        // Compute use_summary before spawning: explicit params only
407        let use_summary_for_task = params.output_control.force != Some(true)
408            && params.output_control.summary == Some(true);
409
410        // Get total file count for progress reporting
411        let total_files = match walk_directory(path, max_depth) {
412            Ok(entries) => entries.iter().filter(|e| !e.is_dir).count(),
413            Err(_) => 0,
414        };
415
416        // Spawn blocking analysis with progress tracking
417        let handle = tokio::task::spawn_blocking(move || {
418            analyze::analyze_focused_with_progress(
419                &path_owned,
420                &symbol_owned,
421                match_mode,
422                follow_depth,
423                max_depth,
424                ast_recursion_limit,
425                counter_clone,
426                ct_clone,
427                use_summary_for_task,
428            )
429        });
430
431        // Poll and emit progress every 100ms
432        let token = ProgressToken(NumberOrString::String(
433            format!(
434                "analyze-symbol-{}",
435                std::time::SystemTime::now()
436                    .duration_since(std::time::UNIX_EPOCH)
437                    .map(|d| d.as_nanos())
438                    .unwrap_or(0)
439            )
440            .into(),
441        ));
442        let peer = self.peer.lock().await.clone();
443        let mut last_progress = 0usize;
444        let mut cancelled = false;
445        loop {
446            tokio::time::sleep(std::time::Duration::from_millis(100)).await;
447            if ct.is_cancelled() {
448                cancelled = true;
449                break;
450            }
451            let current = counter.load(std::sync::atomic::Ordering::Relaxed);
452            if current != last_progress && total_files > 0 {
453                self.emit_progress(
454                    peer.clone(),
455                    &token,
456                    current as f64,
457                    total_files as f64,
458                    format!(
459                        "Analyzing {}/{} files for symbol '{}'",
460                        current, total_files, params.symbol
461                    ),
462                )
463                .await;
464                last_progress = current;
465            }
466            if handle.is_finished() {
467                break;
468            }
469        }
470
471        // Emit final 100% progress only if not cancelled
472        if !cancelled && total_files > 0 {
473            self.emit_progress(
474                peer.clone(),
475                &token,
476                total_files as f64,
477                total_files as f64,
478                format!(
479                    "Completed analyzing {} files for symbol '{}'",
480                    total_files, params.symbol
481                ),
482            )
483            .await;
484        }
485
486        let mut output = match handle.await {
487            Ok(Ok(output)) => output,
488            Ok(Err(analyze::AnalyzeError::Cancelled)) => {
489                return Err(ErrorData::new(
490                    rmcp::model::ErrorCode::INTERNAL_ERROR,
491                    "Analysis cancelled".to_string(),
492                    error_meta("transient", true, "analysis was cancelled"),
493                ));
494            }
495            Ok(Err(e)) => {
496                return Err(ErrorData::new(
497                    rmcp::model::ErrorCode::INTERNAL_ERROR,
498                    format!("Error analyzing symbol: {}", e),
499                    error_meta("resource", false, "check symbol name and file"),
500                ));
501            }
502            Err(e) => {
503                return Err(ErrorData::new(
504                    rmcp::model::ErrorCode::INTERNAL_ERROR,
505                    format!("Task join error: {}", e),
506                    error_meta("transient", true, "retry the request"),
507                ));
508            }
509        };
510
511        // Auto-detect: if no explicit summary param and output exceeds limit,
512        // re-run analysis with use_summary=true
513        if params.output_control.summary.is_none()
514            && params.output_control.force != Some(true)
515            && output.formatted.len() > SIZE_LIMIT
516        {
517            let path_owned2 = Path::new(&params.path).to_path_buf();
518            let symbol_owned2 = params.symbol.clone();
519            let match_mode2 = params.match_mode.clone().unwrap_or_default();
520            let follow_depth2 = params.follow_depth.unwrap_or(1);
521            let max_depth2 = params.max_depth;
522            let ast_recursion_limit2 = params.ast_recursion_limit;
523            let counter2 = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
524            let ct2 = ct.clone();
525            let summary_result = tokio::task::spawn_blocking(move || {
526                analyze::analyze_focused_with_progress(
527                    &path_owned2,
528                    &symbol_owned2,
529                    match_mode2,
530                    follow_depth2,
531                    max_depth2,
532                    ast_recursion_limit2,
533                    counter2,
534                    ct2,
535                    true, // use_summary=true
536                )
537            })
538            .await;
539            match summary_result {
540                Ok(Ok(summary_output)) => {
541                    output.formatted = summary_output.formatted;
542                }
543                _ => {
544                    // Fallback: return error (summary generation failed)
545                    let estimated_tokens = output.formatted.len() / 4;
546                    let message = format!(
547                        "Output exceeds 50K chars ({} chars, ~{} tokens). Use summary=true or force=true.",
548                        output.formatted.len(),
549                        estimated_tokens
550                    );
551                    return Err(ErrorData::new(
552                        rmcp::model::ErrorCode::INVALID_PARAMS,
553                        message,
554                        error_meta("validation", false, "use summary=true or force=true"),
555                    ));
556                }
557            }
558        } else if output.formatted.len() > SIZE_LIMIT
559            && params.output_control.force != Some(true)
560            && params.output_control.summary == Some(false)
561        {
562            // Explicit summary=false with large output: return error
563            let estimated_tokens = output.formatted.len() / 4;
564            let message = format!(
565                "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
566                 - force=true to return full output\n\
567                 - summary=true to get compact summary\n\
568                 - Narrow your scope (smaller directory, specific file)",
569                output.formatted.len(),
570                estimated_tokens
571            );
572            return Err(ErrorData::new(
573                rmcp::model::ErrorCode::INVALID_PARAMS,
574                message,
575                error_meta(
576                    "validation",
577                    false,
578                    "use force=true, summary=true, or narrow scope",
579                ),
580            ));
581        }
582
583        Ok(output)
584    }
585
586    #[instrument(skip(self, context))]
587    #[tool(
588        name = "analyze_directory",
589        description = "Analyze directory structure and code metrics for multi-file overview. Use this tool for directories; use analyze_file for a single file. Returns a tree with LOC, function count, class count, and test file markers. Respects .gitignore (results may differ from raw filesystem listing because .gitignore rules are applied). For repos with 1000+ files, use max_depth=2-3 and summary=true to stay within token budgets. Note: max_depth controls what is analyzed (traversal depth), while page_size controls how results are returned (chunking); these are independent. Strategy comparison: prefer pagination (page_size=50) over force=true to reduce per-call token overhead; use summary=true when counts and structure are sufficient and no pagination is needed; force=true is an escape hatch for exceptional cases. Empty directories return an empty tree with zero counts. Output auto-summarizes at 50K chars; use summary=true to force compact output. Paginate large results with cursor and page_size. Example queries: Analyze the src/ directory to understand module structure; What files are in the tests/ directory and how large are they? summary=true and cursor are mutually exclusive; passing both returns an error.",
590        output_schema = schema_for_type::<analyze::AnalysisOutput>(),
591        annotations(
592            title = "Analyze Directory",
593            read_only_hint = true,
594            destructive_hint = false,
595            idempotent_hint = true,
596            open_world_hint = false
597        )
598    )]
599    async fn analyze_directory(
600        &self,
601        params: Parameters<AnalyzeDirectoryParams>,
602        context: RequestContext<RoleServer>,
603    ) -> Result<CallToolResult, ErrorData> {
604        let params = params.0;
605        let ct = context.ct.clone();
606        let _t_start = std::time::Instant::now();
607        let _param_path = params.path.clone();
608        let _max_depth_val = params.max_depth;
609        let _seq = self
610            .session_call_seq
611            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
612        let _sid = self.session_id.lock().await.clone();
613
614        // Call handler for analysis and progress tracking
615        let mut output = match self.handle_overview_mode(&params, ct).await {
616            Ok(v) => v,
617            Err(e) => return Ok(err_to_tool_result(e)),
618        };
619
620        // summary=true (explicit) and cursor are mutually exclusive.
621        // Auto-summarization (summary=None + large output) must NOT block cursor pagination.
622        if summary_cursor_conflict(
623            params.output_control.summary,
624            params.pagination.cursor.as_deref(),
625        ) {
626            return Ok(err_to_tool_result(ErrorData::new(
627                rmcp::model::ErrorCode::INVALID_PARAMS,
628                "summary=true is incompatible with a pagination cursor; use one or the other"
629                    .to_string(),
630                error_meta("validation", false, "remove cursor or set summary=false"),
631            )));
632        }
633
634        // Apply summary/output size limiting logic
635        let use_summary = if params.output_control.force == Some(true) {
636            false
637        } else if params.output_control.summary == Some(true) {
638            true
639        } else if params.output_control.summary == Some(false) {
640            false
641        } else {
642            output.formatted.len() > SIZE_LIMIT
643        };
644
645        if use_summary {
646            output.formatted = format_summary(
647                &output.entries,
648                &output.files,
649                params.max_depth,
650                Some(Path::new(&params.path)),
651                output.subtree_counts.as_deref(),
652            );
653        }
654
655        // Decode pagination cursor if provided
656        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
657        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
658            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
659                ErrorData::new(
660                    rmcp::model::ErrorCode::INVALID_PARAMS,
661                    e.to_string(),
662                    error_meta("validation", false, "invalid cursor format"),
663                )
664            }) {
665                Ok(v) => v,
666                Err(e) => return Ok(err_to_tool_result(e)),
667            };
668            cursor_data.offset
669        } else {
670            0
671        };
672
673        // Apply pagination to files
674        let paginated =
675            match paginate_slice(&output.files, offset, page_size, PaginationMode::Default) {
676                Ok(v) => v,
677                Err(e) => {
678                    return Ok(err_to_tool_result(ErrorData::new(
679                        rmcp::model::ErrorCode::INTERNAL_ERROR,
680                        e.to_string(),
681                        error_meta("transient", true, "retry the request"),
682                    )));
683                }
684            };
685
686        let verbose = params.output_control.verbose.unwrap_or(false);
687        if !use_summary {
688            output.formatted = format_structure_paginated(
689                &paginated.items,
690                paginated.total,
691                params.max_depth,
692                Some(Path::new(&params.path)),
693                verbose,
694            );
695        }
696
697        // Update next_cursor in output after pagination (unless using summary mode)
698        if use_summary {
699            output.next_cursor = None;
700        } else {
701            output.next_cursor = paginated.next_cursor.clone();
702        }
703
704        // Build final text output with pagination cursor if present (unless using summary mode)
705        let mut final_text = output.formatted.clone();
706        if !use_summary && let Some(cursor) = paginated.next_cursor {
707            final_text.push('\n');
708            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
709        }
710
711        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
712            .with_meta(Some(no_cache_meta()));
713        let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
714        result.structured_content = Some(structured);
715        let _dur = _t_start.elapsed().as_millis() as u64;
716        self.metrics_tx.send(crate::metrics::MetricEvent {
717            ts: crate::metrics::unix_ms(),
718            tool: "analyze_directory",
719            duration_ms: _dur,
720            output_chars: final_text.chars().count(),
721            param_path_depth: crate::metrics::path_component_count(&_param_path),
722            max_depth: _max_depth_val,
723            result: "ok",
724            error_type: None,
725            session_id: _sid,
726            seq: Some(_seq),
727        });
728        Ok(result)
729    }
730
731    #[instrument(skip(self, context))]
732    #[tool(
733        name = "analyze_file",
734        description = "Extract semantic structure from a single source file only; pass a directory to analyze_directory instead. Returns functions with signatures, types, and line ranges; class and method definitions with inheritance, fields, and imports. Supported languages: Rust, Go, Java, Python, TypeScript, TSX, Fortran; unsupported file extensions return an error. Common mistake: passing a directory path returns an error; use analyze_directory for directories. Generated code with deeply nested ASTs may exceed 50K chars; use summary=true to get counts only. Supports pagination for large files via cursor/page_size. Use summary=true for compact output. Example queries: What functions are defined in src/lib.rs?; Show me the classes and their methods in src/analyzer.py",
735        output_schema = schema_for_type::<analyze::FileAnalysisOutput>(),
736        annotations(
737            title = "Analyze File",
738            read_only_hint = true,
739            destructive_hint = false,
740            idempotent_hint = true,
741            open_world_hint = false
742        )
743    )]
744    async fn analyze_file(
745        &self,
746        params: Parameters<AnalyzeFileParams>,
747        context: RequestContext<RoleServer>,
748    ) -> Result<CallToolResult, ErrorData> {
749        let params = params.0;
750        let _ct = context.ct.clone();
751        let _t_start = std::time::Instant::now();
752        let _param_path = params.path.clone();
753        let _seq = self
754            .session_call_seq
755            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
756        let _sid = self.session_id.lock().await.clone();
757
758        // Call handler for analysis and caching
759        let arc_output = match self.handle_file_details_mode(&params).await {
760            Ok(v) => v,
761            Err(e) => return Ok(err_to_tool_result(e)),
762        };
763
764        // Clone only the two fields that may be mutated per-request (formatted and
765        // next_cursor). The heavy SemanticAnalysis data is shared via Arc and never
766        // modified, so we borrow it directly from the cached pointer.
767        let mut formatted = arc_output.formatted.clone();
768        let line_count = arc_output.line_count;
769
770        // Apply summary/output size limiting logic
771        let use_summary = if params.output_control.force == Some(true) {
772            false
773        } else if params.output_control.summary == Some(true) {
774            true
775        } else if params.output_control.summary == Some(false) {
776            false
777        } else {
778            formatted.len() > SIZE_LIMIT
779        };
780
781        if use_summary {
782            formatted = format_file_details_summary(&arc_output.semantic, &params.path, line_count);
783        } else if formatted.len() > SIZE_LIMIT && params.output_control.force != Some(true) {
784            let estimated_tokens = formatted.len() / 4;
785            let message = format!(
786                "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
787                 - force=true to return full output\n\
788                 - Narrow your scope (smaller directory, specific file)\n\
789                 - Use analyze_symbol mode for targeted analysis\n\
790                 - Reduce max_depth parameter",
791                formatted.len(),
792                estimated_tokens
793            );
794            return Ok(err_to_tool_result(ErrorData::new(
795                rmcp::model::ErrorCode::INVALID_PARAMS,
796                message,
797                error_meta("validation", false, "use force=true or narrow scope"),
798            )));
799        }
800
801        // Decode pagination cursor if provided (analyze_file)
802        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
803        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
804            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
805                ErrorData::new(
806                    rmcp::model::ErrorCode::INVALID_PARAMS,
807                    e.to_string(),
808                    error_meta("validation", false, "invalid cursor format"),
809                )
810            }) {
811                Ok(v) => v,
812                Err(e) => return Ok(err_to_tool_result(e)),
813            };
814            cursor_data.offset
815        } else {
816            0
817        };
818
819        // Filter to top-level functions only (exclude methods) before pagination
820        let top_level_fns: Vec<crate::types::FunctionInfo> = arc_output
821            .semantic
822            .functions
823            .iter()
824            .filter(|func| {
825                !arc_output
826                    .semantic
827                    .classes
828                    .iter()
829                    .any(|class| func.line >= class.line && func.end_line <= class.end_line)
830            })
831            .cloned()
832            .collect();
833
834        // Paginate top-level functions only
835        let paginated =
836            match paginate_slice(&top_level_fns, offset, page_size, PaginationMode::Default) {
837                Ok(v) => v,
838                Err(e) => {
839                    return Ok(err_to_tool_result(ErrorData::new(
840                        rmcp::model::ErrorCode::INTERNAL_ERROR,
841                        e.to_string(),
842                        error_meta("transient", true, "retry the request"),
843                    )));
844                }
845            };
846
847        // Regenerate formatted output using the paginated formatter (handles verbose and pagination correctly)
848        let verbose = params.output_control.verbose.unwrap_or(false);
849        if !use_summary {
850            formatted = format_file_details_paginated(
851                &paginated.items,
852                paginated.total,
853                &arc_output.semantic,
854                &params.path,
855                line_count,
856                offset,
857                verbose,
858            );
859        }
860
861        // Capture next_cursor from pagination result (unless using summary mode)
862        let next_cursor = if use_summary {
863            None
864        } else {
865            paginated.next_cursor.clone()
866        };
867
868        // Build final text output with pagination cursor if present (unless using summary mode)
869        let mut final_text = formatted.clone();
870        if !use_summary && let Some(ref cursor) = next_cursor {
871            final_text.push('\n');
872            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
873        }
874
875        // Build the response output, sharing SemanticAnalysis from the Arc to avoid cloning it.
876        let response_output = analyze::FileAnalysisOutput {
877            formatted,
878            semantic: arc_output.semantic.clone(),
879            line_count,
880            next_cursor,
881        };
882
883        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
884            .with_meta(Some(no_cache_meta()));
885        let structured = serde_json::to_value(&response_output).unwrap_or(Value::Null);
886        result.structured_content = Some(structured);
887        let _dur = _t_start.elapsed().as_millis() as u64;
888        self.metrics_tx.send(crate::metrics::MetricEvent {
889            ts: crate::metrics::unix_ms(),
890            tool: "analyze_file",
891            duration_ms: _dur,
892            output_chars: final_text.chars().count(),
893            param_path_depth: crate::metrics::path_component_count(&_param_path),
894            max_depth: None,
895            result: "ok",
896            error_type: None,
897            session_id: _sid,
898            seq: Some(_seq),
899        });
900        Ok(result)
901    }
902
903    #[instrument(skip(self, context))]
904    #[tool(
905        name = "analyze_symbol",
906        description = "Build call graph for a named function or method across all files in a directory to trace a specific function's usage. Returns direct callers and callees. Default symbol lookup is case-sensitive exact-match (match_mode=exact); myFunc and myfunc are different symbols. If exact match fails, retry with match_mode=insensitive for a case-insensitive search. To list candidates matching a prefix, use match_mode=prefix. To find symbols containing a substring, use match_mode=contains. When prefix or contains matches multiple symbols, an error is returned listing all candidates so you can refine to a single match. A symbol unknown to the graph (not defined and not referenced) returns an error; a symbol that is defined but has no callers or callees returns empty chains without error. follow_depth warning: each increment can multiply output size exponentially; use follow_depth=1 for production use; follow_depth=2+ only for targeted deep dives. Use cursor/page_size to paginate call chains when results exceed page_size. Example queries: Find all callers of the parse_config function; Trace the call chain for MyClass.process_request up to 2 levels deep",
907        output_schema = schema_for_type::<analyze::FocusedAnalysisOutput>(),
908        annotations(
909            title = "Analyze Symbol",
910            read_only_hint = true,
911            destructive_hint = false,
912            idempotent_hint = true,
913            open_world_hint = false
914        )
915    )]
916    async fn analyze_symbol(
917        &self,
918        params: Parameters<AnalyzeSymbolParams>,
919        context: RequestContext<RoleServer>,
920    ) -> Result<CallToolResult, ErrorData> {
921        let params = params.0;
922        let ct = context.ct.clone();
923        let _t_start = std::time::Instant::now();
924        let _param_path = params.path.clone();
925        let _max_depth_val = params.follow_depth;
926        let _seq = self
927            .session_call_seq
928            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
929        let _sid = self.session_id.lock().await.clone();
930
931        // Call handler for analysis and progress tracking
932        let mut output = match self.handle_focused_mode(&params, ct).await {
933            Ok(v) => v,
934            Err(e) => return Ok(err_to_tool_result(e)),
935        };
936
937        // Decode pagination cursor if provided (analyze_symbol)
938        let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
939        let offset = if let Some(ref cursor_str) = params.pagination.cursor {
940            let cursor_data = match decode_cursor(cursor_str).map_err(|e| {
941                ErrorData::new(
942                    rmcp::model::ErrorCode::INVALID_PARAMS,
943                    e.to_string(),
944                    error_meta("validation", false, "invalid cursor format"),
945                )
946            }) {
947                Ok(v) => v,
948                Err(e) => return Ok(err_to_tool_result(e)),
949            };
950            cursor_data.offset
951        } else {
952            0
953        };
954
955        // SymbolFocus pagination: decode cursor mode to determine callers vs callees
956        let cursor_mode = if let Some(ref cursor_str) = params.pagination.cursor {
957            decode_cursor(cursor_str)
958                .map(|c| c.mode)
959                .unwrap_or(PaginationMode::Callers)
960        } else {
961            PaginationMode::Callers
962        };
963
964        let use_summary = params.output_control.summary == Some(true);
965        let verbose = params.output_control.verbose.unwrap_or(false);
966
967        let mut callee_cursor = match cursor_mode {
968            PaginationMode::Callers => {
969                let (paginated_items, paginated_next) = match paginate_focus_chains(
970                    &output.prod_chains,
971                    PaginationMode::Callers,
972                    offset,
973                    page_size,
974                ) {
975                    Ok(v) => v,
976                    Err(e) => return Ok(err_to_tool_result(e)),
977                };
978
979                if !use_summary
980                    && (paginated_next.is_some()
981                        || offset > 0
982                        || !verbose
983                        || !output.outgoing_chains.is_empty())
984                {
985                    let base_path = Path::new(&params.path);
986                    output.formatted = format_focused_paginated(
987                        &paginated_items,
988                        output.prod_chains.len(),
989                        PaginationMode::Callers,
990                        &params.symbol,
991                        &output.prod_chains,
992                        &output.test_chains,
993                        &output.outgoing_chains,
994                        output.def_count,
995                        offset,
996                        Some(base_path),
997                        verbose,
998                    );
999                    paginated_next
1000                } else {
1001                    None
1002                }
1003            }
1004            PaginationMode::Callees => {
1005                let (paginated_items, paginated_next) = match paginate_focus_chains(
1006                    &output.outgoing_chains,
1007                    PaginationMode::Callees,
1008                    offset,
1009                    page_size,
1010                ) {
1011                    Ok(v) => v,
1012                    Err(e) => return Ok(err_to_tool_result(e)),
1013                };
1014
1015                if paginated_next.is_some() || offset > 0 || !verbose {
1016                    let base_path = Path::new(&params.path);
1017                    output.formatted = format_focused_paginated(
1018                        &paginated_items,
1019                        output.outgoing_chains.len(),
1020                        PaginationMode::Callees,
1021                        &params.symbol,
1022                        &output.prod_chains,
1023                        &output.test_chains,
1024                        &output.outgoing_chains,
1025                        output.def_count,
1026                        offset,
1027                        Some(base_path),
1028                        verbose,
1029                    );
1030                    paginated_next
1031                } else {
1032                    None
1033                }
1034            }
1035            PaginationMode::Default => {
1036                unreachable!("SymbolFocus should only use Callers or Callees modes")
1037            }
1038        };
1039
1040        // When callers are exhausted and callees exist, bootstrap callee pagination
1041        // by emitting a {mode:callees, offset:0} cursor. This makes PaginationMode::Callees
1042        // reachable; without it the branch was dead code. Suppressed in summary mode
1043        // because summary and pagination are mutually exclusive.
1044        if callee_cursor.is_none()
1045            && cursor_mode == PaginationMode::Callers
1046            && !output.outgoing_chains.is_empty()
1047            && !use_summary
1048            && let Ok(cursor) = encode_cursor(&CursorData {
1049                mode: PaginationMode::Callees,
1050                offset: 0,
1051            })
1052        {
1053            callee_cursor = Some(cursor);
1054        }
1055
1056        // Update next_cursor in output
1057        output.next_cursor = callee_cursor.clone();
1058
1059        // Build final text output with pagination cursor if present
1060        let mut final_text = output.formatted.clone();
1061        if let Some(cursor) = callee_cursor {
1062            final_text.push('\n');
1063            final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
1064        }
1065
1066        let mut result = CallToolResult::success(vec![Content::text(final_text.clone())])
1067            .with_meta(Some(no_cache_meta()));
1068        let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
1069        result.structured_content = Some(structured);
1070        let _dur = _t_start.elapsed().as_millis() as u64;
1071        self.metrics_tx.send(crate::metrics::MetricEvent {
1072            ts: crate::metrics::unix_ms(),
1073            tool: "analyze_symbol",
1074            duration_ms: _dur,
1075            output_chars: final_text.chars().count(),
1076            param_path_depth: crate::metrics::path_component_count(&_param_path),
1077            max_depth: _max_depth_val,
1078            result: "ok",
1079            error_type: None,
1080            session_id: _sid,
1081            seq: Some(_seq),
1082        });
1083        Ok(result)
1084    }
1085
1086    #[instrument(skip(self))]
1087    #[tool(
1088        name = "analyze_module",
1089        description = "Index functions and imports in a single source file with minimal token cost. Returns name, line_count, language, function names with line numbers, and import list only -- no signatures, no types, no call graphs, no references. ~75% smaller output than analyze_file. Use analyze_file when you need function signatures, types, or class details; use analyze_module when you only need a function/import index to orient in a file or survey many files in sequence. Use analyze_directory for multi-file overviews; use analyze_symbol to trace call graphs for a specific function. Supported languages: Rust, Go, Java, Python, TypeScript, TSX, Fortran; unsupported extensions return an error. Example queries: What functions are defined in src/analyze.rs?; List all imports in src/lib.rs. Pagination, summary, force, and verbose parameters are not supported by this tool.",
1090        output_schema = schema_for_type::<types::ModuleInfo>(),
1091        annotations(
1092            title = "Analyze Module",
1093            read_only_hint = true,
1094            destructive_hint = false,
1095            idempotent_hint = true,
1096            open_world_hint = false
1097        )
1098    )]
1099    async fn analyze_module(
1100        &self,
1101        params: Parameters<AnalyzeModuleParams>,
1102        _context: RequestContext<RoleServer>,
1103    ) -> Result<CallToolResult, ErrorData> {
1104        let params = params.0;
1105        let _t_start = std::time::Instant::now();
1106        let _param_path = params.path.clone();
1107        let _seq = self
1108            .session_call_seq
1109            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1110        let _sid = self.session_id.lock().await.clone();
1111
1112        // Issue 340: Guard against directory paths
1113        if std::fs::metadata(&params.path)
1114            .map(|m| m.is_dir())
1115            .unwrap_or(false)
1116        {
1117            let _dur = _t_start.elapsed().as_millis() as u64;
1118            self.metrics_tx.send(crate::metrics::MetricEvent {
1119                ts: crate::metrics::unix_ms(),
1120                tool: "analyze_module",
1121                duration_ms: _dur,
1122                output_chars: 0,
1123                param_path_depth: crate::metrics::path_component_count(&_param_path),
1124                max_depth: None,
1125                result: "error",
1126                error_type: Some("invalid_params".to_string()),
1127                session_id: _sid.clone(),
1128                seq: Some(_seq),
1129            });
1130            return Ok(err_to_tool_result(ErrorData::new(
1131                rmcp::model::ErrorCode::INVALID_PARAMS,
1132                format!(
1133                    "'{}' is a directory. Use analyze_directory to analyze a directory, or pass a specific file path to analyze_module.",
1134                    params.path
1135                ),
1136                error_meta("validation", false, "use analyze_directory for directories"),
1137            )));
1138        }
1139
1140        let module_info = match analyze::analyze_module_file(&params.path).map_err(|e| {
1141            ErrorData::new(
1142                rmcp::model::ErrorCode::INVALID_PARAMS,
1143                format!("Failed to analyze module: {}", e),
1144                error_meta(
1145                    "validation",
1146                    false,
1147                    "ensure file exists, is readable, and has a supported extension",
1148                ),
1149            )
1150        }) {
1151            Ok(v) => v,
1152            Err(e) => return Ok(err_to_tool_result(e)),
1153        };
1154
1155        let text = format_module_info(&module_info);
1156        let mut result = CallToolResult::success(vec![Content::text(text.clone())])
1157            .with_meta(Some(no_cache_meta()));
1158        let structured = match serde_json::to_value(&module_info).map_err(|e| {
1159            ErrorData::new(
1160                rmcp::model::ErrorCode::INTERNAL_ERROR,
1161                format!("serialization failed: {}", e),
1162                error_meta("internal", false, "report this as a bug"),
1163            )
1164        }) {
1165            Ok(v) => v,
1166            Err(e) => return Ok(err_to_tool_result(e)),
1167        };
1168        result.structured_content = Some(structured);
1169        let _dur = _t_start.elapsed().as_millis() as u64;
1170        self.metrics_tx.send(crate::metrics::MetricEvent {
1171            ts: crate::metrics::unix_ms(),
1172            tool: "analyze_module",
1173            duration_ms: _dur,
1174            output_chars: text.chars().count(),
1175            param_path_depth: crate::metrics::path_component_count(&_param_path),
1176            max_depth: None,
1177            result: "ok",
1178            error_type: None,
1179            session_id: _sid,
1180            seq: Some(_seq),
1181        });
1182        Ok(result)
1183    }
1184}
1185
1186#[tool_handler]
1187impl ServerHandler for CodeAnalyzer {
1188    fn get_info(&self) -> InitializeResult {
1189        let excluded = crate::EXCLUDED_DIRS.join(", ");
1190        let instructions = format!(
1191            "Recommended workflow for unknown repositories:\n\
1192            1. Start with analyze_directory(path=<repo_root>, max_depth=2, summary=true) to identify the source package directory \
1193            (typically the largest directory by file count; exclude {excluded}).\n\
1194            2. Re-run analyze_directory(path=<source_package>, max_depth=2, summary=true) for a module map with per-package class and function counts.\n\
1195            3. Use analyze_file on key files identified in step 2 (prefer files with high class counts for framework entry points).\n\
1196            4. Use analyze_symbol to trace call graphs for specific functions found in step 3.\n\
1197            Use analyze_module for a minimal schema (name, line count, functions, imports) when token budget is critical. \
1198            Prefer summary=true on large directories (1000+ files). Set max_depth=2 for the first call; increase only if packages are too large to differentiate. \
1199            Paginate with cursor/page_size. For subagents: DISABLE_PROMPT_CACHING=1."
1200        );
1201        let capabilities = ServerCapabilities::builder()
1202            .enable_logging()
1203            .enable_tools()
1204            .enable_tool_list_changed()
1205            .enable_completions()
1206            .build();
1207        let server_info = Implementation::new("code-analyze-mcp", env!("CARGO_PKG_VERSION"))
1208            .with_title("Code Analyze MCP")
1209            .with_description("MCP server for code structure analysis using tree-sitter");
1210        InitializeResult::new(capabilities)
1211            .with_server_info(server_info)
1212            .with_instructions(&instructions)
1213    }
1214
1215    async fn on_initialized(&self, context: NotificationContext<RoleServer>) {
1216        let mut peer_lock = self.peer.lock().await;
1217        *peer_lock = Some(context.peer.clone());
1218        drop(peer_lock);
1219
1220        // Generate session_id in MILLIS-N format
1221        let millis = std::time::SystemTime::now()
1222            .duration_since(std::time::UNIX_EPOCH)
1223            .unwrap_or_default()
1224            .as_millis() as u64;
1225        let counter = GLOBAL_SESSION_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
1226        let sid = format!("{}-{}", millis, counter);
1227        {
1228            let mut session_id_lock = self.session_id.lock().await;
1229            *session_id_lock = Some(sid);
1230        }
1231        self.session_call_seq
1232            .store(0, std::sync::atomic::Ordering::Relaxed);
1233
1234        // Spawn consumer task to drain log events from channel with batching.
1235        let peer = self.peer.clone();
1236        let event_rx = self.event_rx.clone();
1237
1238        tokio::spawn(async move {
1239            let rx = {
1240                let mut rx_lock = event_rx.lock().await;
1241                rx_lock.take()
1242            };
1243
1244            if let Some(mut receiver) = rx {
1245                let mut buffer = Vec::with_capacity(64);
1246                loop {
1247                    // Drain up to 64 events from channel
1248                    receiver.recv_many(&mut buffer, 64).await;
1249
1250                    if buffer.is_empty() {
1251                        // Channel closed, exit consumer task
1252                        break;
1253                    }
1254
1255                    // Acquire peer lock once per batch
1256                    let peer_lock = peer.lock().await;
1257                    if let Some(peer) = peer_lock.as_ref() {
1258                        for log_event in buffer.drain(..) {
1259                            let notification = ServerNotification::LoggingMessageNotification(
1260                                Notification::new(LoggingMessageNotificationParam {
1261                                    level: log_event.level,
1262                                    logger: Some(log_event.logger),
1263                                    data: log_event.data,
1264                                }),
1265                            );
1266                            if let Err(e) = peer.send_notification(notification).await {
1267                                warn!("Failed to send logging notification: {}", e);
1268                            }
1269                        }
1270                    }
1271                }
1272            }
1273        });
1274    }
1275
1276    #[instrument(skip(self, _context))]
1277    async fn on_cancelled(
1278        &self,
1279        notification: CancelledNotificationParam,
1280        _context: NotificationContext<RoleServer>,
1281    ) {
1282        tracing::info!(
1283            request_id = ?notification.request_id,
1284            reason = ?notification.reason,
1285            "Received cancellation notification"
1286        );
1287    }
1288
1289    #[instrument(skip(self, _context))]
1290    async fn complete(
1291        &self,
1292        request: CompleteRequestParams,
1293        _context: RequestContext<RoleServer>,
1294    ) -> Result<CompleteResult, ErrorData> {
1295        // Dispatch on argument name: "path" or "symbol"
1296        let argument_name = &request.argument.name;
1297        let argument_value = &request.argument.value;
1298
1299        let completions = match argument_name.as_str() {
1300            "path" => {
1301                // Path completions: use current directory as root
1302                let root = Path::new(".");
1303                completion::path_completions(root, argument_value)
1304            }
1305            "symbol" => {
1306                // Symbol completions: need the path argument from context
1307                let path_arg = request
1308                    .context
1309                    .as_ref()
1310                    .and_then(|ctx| ctx.get_argument("path"));
1311
1312                match path_arg {
1313                    Some(path_str) => {
1314                        let path = Path::new(path_str);
1315                        completion::symbol_completions(&self.cache, path, argument_value)
1316                    }
1317                    None => Vec::new(),
1318                }
1319            }
1320            _ => Vec::new(),
1321        };
1322
1323        // Create CompletionInfo with has_more flag if >100 results
1324        let total_count = completions.len() as u32;
1325        let (values, has_more) = if completions.len() > 100 {
1326            (completions.into_iter().take(100).collect(), true)
1327        } else {
1328            (completions, false)
1329        };
1330
1331        let completion_info =
1332            match CompletionInfo::with_pagination(values, Some(total_count), has_more) {
1333                Ok(info) => info,
1334                Err(_) => {
1335                    // Graceful degradation: return empty on error
1336                    CompletionInfo::with_all_values(Vec::new())
1337                        .unwrap_or_else(|_| CompletionInfo::new(Vec::new()).unwrap())
1338                }
1339            };
1340
1341        Ok(CompleteResult::new(completion_info))
1342    }
1343
1344    async fn set_level(
1345        &self,
1346        params: SetLevelRequestParams,
1347        _context: RequestContext<RoleServer>,
1348    ) -> Result<(), ErrorData> {
1349        let level_filter = match params.level {
1350            LoggingLevel::Debug => LevelFilter::DEBUG,
1351            LoggingLevel::Info => LevelFilter::INFO,
1352            LoggingLevel::Notice => LevelFilter::INFO,
1353            LoggingLevel::Warning => LevelFilter::WARN,
1354            LoggingLevel::Error => LevelFilter::ERROR,
1355            LoggingLevel::Critical => LevelFilter::ERROR,
1356            LoggingLevel::Alert => LevelFilter::ERROR,
1357            LoggingLevel::Emergency => LevelFilter::ERROR,
1358        };
1359
1360        let mut filter_lock = self.log_level_filter.lock().unwrap();
1361        *filter_lock = level_filter;
1362        Ok(())
1363    }
1364}
1365
1366#[cfg(test)]
1367mod tests {
1368    use super::*;
1369
1370    #[tokio::test]
1371    async fn test_emit_progress_none_peer_is_noop() {
1372        let peer = Arc::new(TokioMutex::new(None));
1373        let log_level_filter = Arc::new(Mutex::new(LevelFilter::INFO));
1374        let (_tx, rx) = tokio::sync::mpsc::unbounded_channel();
1375        let (metrics_tx, _metrics_rx) = tokio::sync::mpsc::unbounded_channel();
1376        let analyzer = CodeAnalyzer::new(
1377            peer,
1378            log_level_filter,
1379            rx,
1380            crate::metrics::MetricsSender(metrics_tx),
1381        );
1382        let token = ProgressToken(NumberOrString::String("test".into()));
1383        // Should complete without panic
1384        analyzer
1385            .emit_progress(None, &token, 0.0, 10.0, "test".to_string())
1386            .await;
1387    }
1388
1389    #[tokio::test]
1390    async fn test_handle_overview_mode_verbose_no_summary_block() {
1391        use crate::pagination::{PaginationMode, paginate_slice};
1392        use crate::types::{AnalyzeDirectoryParams, OutputControlParams, PaginationParams};
1393        use tempfile::TempDir;
1394
1395        let tmp = TempDir::new().unwrap();
1396        std::fs::write(tmp.path().join("main.rs"), "fn main() {}").unwrap();
1397
1398        let peer = Arc::new(TokioMutex::new(None));
1399        let log_level_filter = Arc::new(Mutex::new(LevelFilter::INFO));
1400        let (_tx, rx) = tokio::sync::mpsc::unbounded_channel();
1401        let (metrics_tx, _metrics_rx) = tokio::sync::mpsc::unbounded_channel();
1402        let analyzer = CodeAnalyzer::new(
1403            peer,
1404            log_level_filter,
1405            rx,
1406            crate::metrics::MetricsSender(metrics_tx),
1407        );
1408
1409        let params = AnalyzeDirectoryParams {
1410            path: tmp.path().to_str().unwrap().to_string(),
1411            max_depth: None,
1412            pagination: PaginationParams {
1413                cursor: None,
1414                page_size: None,
1415            },
1416            output_control: OutputControlParams {
1417                summary: None,
1418                force: None,
1419                verbose: Some(true),
1420            },
1421        };
1422
1423        let ct = tokio_util::sync::CancellationToken::new();
1424        let output = analyzer.handle_overview_mode(&params, ct).await.unwrap();
1425
1426        // Replicate the handler's formatting path (the fix site)
1427        let use_summary = output.formatted.len() > SIZE_LIMIT; // summary=None, force=None, small output
1428        let paginated =
1429            paginate_slice(&output.files, 0, DEFAULT_PAGE_SIZE, PaginationMode::Default).unwrap();
1430        let verbose = true;
1431        let formatted = if !use_summary {
1432            format_structure_paginated(
1433                &paginated.items,
1434                paginated.total,
1435                params.max_depth,
1436                Some(std::path::Path::new(&params.path)),
1437                verbose,
1438            )
1439        } else {
1440            output.formatted.clone()
1441        };
1442
1443        // After the fix: verbose=true must not emit the SUMMARY: block
1444        assert!(
1445            !formatted.contains("SUMMARY:"),
1446            "verbose=true must not emit SUMMARY: block; got: {}",
1447            &formatted[..formatted.len().min(300)]
1448        );
1449        assert!(
1450            formatted.contains("PAGINATED:"),
1451            "verbose=true must emit PAGINATED: header"
1452        );
1453        assert!(
1454            formatted.contains("FILES [LOC, FUNCTIONS, CLASSES]"),
1455            "verbose=true must emit FILES section header"
1456        );
1457    }
1458}