1pub mod analyze;
17pub mod cache;
18pub mod completion;
19pub mod formatter;
20pub mod graph;
21pub mod lang;
22pub mod languages;
23pub mod logging;
24pub mod pagination;
25pub mod parser;
26pub mod test_detection;
27pub mod traversal;
28pub mod types;
29
30use cache::AnalysisCache;
31use formatter::{
32 format_file_details_paginated, format_file_details_summary, format_focused_paginated,
33 format_structure_paginated, format_summary,
34};
35use logging::LogEvent;
36use pagination::{
37 CursorData, DEFAULT_PAGE_SIZE, PaginationMode, decode_cursor, encode_cursor, paginate_slice,
38};
39use rmcp::handler::server::tool::{ToolRouter, schema_for_type};
40use rmcp::handler::server::wrapper::Parameters;
41use rmcp::model::{
42 CallToolResult, CancelledNotificationParam, CompleteRequestParams, CompleteResult,
43 CompletionInfo, Content, ErrorData, Implementation, InitializeResult, LoggingLevel,
44 LoggingMessageNotificationParam, Notification, NumberOrString, ProgressNotificationParam,
45 ProgressToken, ServerCapabilities, ServerNotification, SetLevelRequestParams,
46};
47use rmcp::service::{NotificationContext, RequestContext};
48use rmcp::{Peer, RoleServer, ServerHandler, tool, tool_handler, tool_router};
49use serde_json::Value;
50use std::path::Path;
51use std::sync::{Arc, Mutex};
52use tokio::sync::{Mutex as TokioMutex, mpsc};
53use tracing::{instrument, warn};
54use tracing_subscriber::filter::LevelFilter;
55use traversal::walk_directory;
56use types::{AnalysisMode, AnalyzeDirectoryParams, AnalyzeFileParams, AnalyzeSymbolParams};
57
58const SIZE_LIMIT: usize = 50_000;
59
60fn paginate_focus_chains(
63 chains: &[graph::CallChain],
64 mode: PaginationMode,
65 offset: usize,
66 page_size: usize,
67) -> Result<(Vec<graph::CallChain>, Option<String>), ErrorData> {
68 let paginated = paginate_slice(chains, offset, page_size, mode)
69 .map_err(|e| ErrorData::new(rmcp::model::ErrorCode::INTERNAL_ERROR, e.to_string(), None))?;
70
71 if paginated.next_cursor.is_none() && offset == 0 {
72 return Ok((paginated.items, None));
73 }
74
75 let next = if let Some(raw_cursor) = paginated.next_cursor {
76 let decoded = decode_cursor(&raw_cursor).map_err(|e| {
77 ErrorData::new(rmcp::model::ErrorCode::INTERNAL_ERROR, e.to_string(), None)
78 })?;
79 Some(
80 encode_cursor(&CursorData {
81 mode,
82 offset: decoded.offset,
83 })
84 .map_err(|e| {
85 ErrorData::new(rmcp::model::ErrorCode::INTERNAL_ERROR, e.to_string(), None)
86 })?,
87 )
88 } else {
89 None
90 };
91
92 Ok((paginated.items, next))
93}
94
95#[derive(Clone)]
96pub struct CodeAnalyzer {
97 tool_router: ToolRouter<Self>,
98 cache: AnalysisCache,
99 peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
100 log_level_filter: Arc<Mutex<LevelFilter>>,
101 event_rx: Arc<TokioMutex<Option<mpsc::UnboundedReceiver<LogEvent>>>>,
102}
103
104#[tool_router]
105impl CodeAnalyzer {
106 pub fn new(
107 peer: Arc<TokioMutex<Option<Peer<RoleServer>>>>,
108 log_level_filter: Arc<Mutex<LevelFilter>>,
109 event_rx: mpsc::UnboundedReceiver<LogEvent>,
110 ) -> Self {
111 CodeAnalyzer {
112 tool_router: Self::tool_router(),
113 cache: AnalysisCache::new(100),
114 peer,
115 log_level_filter,
116 event_rx: Arc::new(TokioMutex::new(Some(event_rx))),
117 }
118 }
119
120 #[instrument(skip(self))]
121 async fn emit_progress(
122 &self,
123 peer: Option<Peer<RoleServer>>,
124 token: &ProgressToken,
125 progress: f64,
126 total: f64,
127 message: String,
128 ) {
129 if let Some(peer) = peer {
130 let notification = ServerNotification::ProgressNotification(Notification::new(
131 ProgressNotificationParam {
132 progress_token: token.clone(),
133 progress,
134 total: Some(total),
135 message: Some(message),
136 },
137 ));
138 if let Err(e) = peer.send_notification(notification).await {
139 warn!("Failed to send progress notification: {}", e);
140 }
141 }
142 }
143
144 #[instrument(skip(self, params, ct))]
148 async fn handle_overview_mode(
149 &self,
150 params: &AnalyzeDirectoryParams,
151 ct: tokio_util::sync::CancellationToken,
152 ) -> Result<analyze::AnalysisOutput, ErrorData> {
153 let path = Path::new(¶ms.path);
154 let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
155 let counter_clone = counter.clone();
156 let path_owned = path.to_path_buf();
157 let max_depth = params.max_depth;
158 let ct_clone = ct.clone();
159
160 let entries = walk_directory(path, max_depth).map_err(|e| {
162 ErrorData::new(
163 rmcp::model::ErrorCode::INTERNAL_ERROR,
164 format!("Failed to walk directory: {}", e),
165 None,
166 )
167 })?;
168
169 let total_files = entries.iter().filter(|e| !e.is_dir).count();
171
172 let handle = tokio::task::spawn_blocking(move || {
174 analyze::analyze_directory_with_progress(&path_owned, entries, counter_clone, ct_clone)
175 });
176
177 let token = ProgressToken(NumberOrString::String(
179 format!(
180 "analyze-overview-{}",
181 std::time::SystemTime::now()
182 .duration_since(std::time::UNIX_EPOCH)
183 .map(|d| d.as_nanos())
184 .unwrap_or(0)
185 )
186 .into(),
187 ));
188 let peer = self.peer.lock().await.clone();
189 let mut last_progress = 0usize;
190 let mut cancelled = false;
191 loop {
192 tokio::time::sleep(std::time::Duration::from_millis(100)).await;
193 if ct.is_cancelled() {
194 cancelled = true;
195 break;
196 }
197 let current = counter.load(std::sync::atomic::Ordering::Relaxed);
198 if current != last_progress && total_files > 0 {
199 self.emit_progress(
200 peer.clone(),
201 &token,
202 current as f64,
203 total_files as f64,
204 format!("Analyzing {}/{} files", current, total_files),
205 )
206 .await;
207 last_progress = current;
208 }
209 if handle.is_finished() {
210 break;
211 }
212 }
213
214 if !cancelled && total_files > 0 {
216 self.emit_progress(
217 peer.clone(),
218 &token,
219 total_files as f64,
220 total_files as f64,
221 format!("Completed analyzing {} files", total_files),
222 )
223 .await;
224 }
225
226 match handle.await {
227 Ok(Ok(output)) => Ok(output),
228 Ok(Err(analyze::AnalyzeError::Cancelled)) => Err(ErrorData::new(
229 rmcp::model::ErrorCode::INTERNAL_ERROR,
230 "Analysis cancelled".to_string(),
231 None,
232 )),
233 Ok(Err(e)) => Err(ErrorData::new(
234 rmcp::model::ErrorCode::INTERNAL_ERROR,
235 format!("Error analyzing directory: {}", e),
236 None,
237 )),
238 Err(e) => Err(ErrorData::new(
239 rmcp::model::ErrorCode::INTERNAL_ERROR,
240 format!("Task join error: {}", e),
241 None,
242 )),
243 }
244 }
245
246 #[instrument(skip(self, params))]
249 async fn handle_file_details_mode(
250 &self,
251 params: &AnalyzeFileParams,
252 ) -> Result<std::sync::Arc<analyze::FileAnalysisOutput>, ErrorData> {
253 let cache_key = std::fs::metadata(¶ms.path).ok().and_then(|meta| {
255 meta.modified().ok().map(|mtime| cache::CacheKey {
256 path: std::path::PathBuf::from(¶ms.path),
257 modified: mtime,
258 mode: AnalysisMode::FileDetails,
259 })
260 });
261
262 if let Some(ref key) = cache_key
264 && let Some(cached) = self.cache.get(key)
265 {
266 return Ok(cached);
267 }
268
269 match analyze::analyze_file(¶ms.path, params.ast_recursion_limit) {
271 Ok(output) => {
272 let arc_output = std::sync::Arc::new(output);
273 if let Some(ref key) = cache_key {
274 self.cache.put(key.clone(), arc_output.clone());
275 }
276 Ok(arc_output)
277 }
278 Err(e) => Err(ErrorData::new(
279 rmcp::model::ErrorCode::INTERNAL_ERROR,
280 format!("Error analyzing file: {}", e),
281 None,
282 )),
283 }
284 }
285
286 #[instrument(skip(self, params, ct))]
290 async fn handle_focused_mode(
291 &self,
292 params: &AnalyzeSymbolParams,
293 ct: tokio_util::sync::CancellationToken,
294 ) -> Result<analyze::FocusedAnalysisOutput, ErrorData> {
295 let follow_depth = params.follow_depth.unwrap_or(1);
296 let counter = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
297 let counter_clone = counter.clone();
298 let path = Path::new(¶ms.path);
299 let path_owned = path.to_path_buf();
300 let max_depth = params.max_depth;
301 let symbol_owned = params.symbol.clone();
302 let ast_recursion_limit = params.ast_recursion_limit;
303 let ct_clone = ct.clone();
304
305 let use_summary_for_task = params.output_control.force != Some(true)
307 && params.output_control.summary == Some(true);
308
309 let total_files = match walk_directory(path, max_depth) {
311 Ok(entries) => entries.iter().filter(|e| !e.is_dir).count(),
312 Err(_) => 0,
313 };
314
315 let handle = tokio::task::spawn_blocking(move || {
317 analyze::analyze_focused_with_progress(
318 &path_owned,
319 &symbol_owned,
320 follow_depth,
321 max_depth,
322 ast_recursion_limit,
323 counter_clone,
324 ct_clone,
325 use_summary_for_task,
326 )
327 });
328
329 let token = ProgressToken(NumberOrString::String(
331 format!(
332 "analyze-symbol-{}",
333 std::time::SystemTime::now()
334 .duration_since(std::time::UNIX_EPOCH)
335 .map(|d| d.as_nanos())
336 .unwrap_or(0)
337 )
338 .into(),
339 ));
340 let peer = self.peer.lock().await.clone();
341 let mut last_progress = 0usize;
342 let mut cancelled = false;
343 loop {
344 tokio::time::sleep(std::time::Duration::from_millis(100)).await;
345 if ct.is_cancelled() {
346 cancelled = true;
347 break;
348 }
349 let current = counter.load(std::sync::atomic::Ordering::Relaxed);
350 if current != last_progress && total_files > 0 {
351 self.emit_progress(
352 peer.clone(),
353 &token,
354 current as f64,
355 total_files as f64,
356 format!(
357 "Analyzing {}/{} files for symbol '{}'",
358 current, total_files, params.symbol
359 ),
360 )
361 .await;
362 last_progress = current;
363 }
364 if handle.is_finished() {
365 break;
366 }
367 }
368
369 if !cancelled && total_files > 0 {
371 self.emit_progress(
372 peer.clone(),
373 &token,
374 total_files as f64,
375 total_files as f64,
376 format!(
377 "Completed analyzing {} files for symbol '{}'",
378 total_files, params.symbol
379 ),
380 )
381 .await;
382 }
383
384 let mut output = match handle.await {
385 Ok(Ok(output)) => output,
386 Ok(Err(analyze::AnalyzeError::Cancelled)) => {
387 return Err(ErrorData::new(
388 rmcp::model::ErrorCode::INTERNAL_ERROR,
389 "Analysis cancelled".to_string(),
390 None,
391 ));
392 }
393 Ok(Err(e)) => {
394 return Err(ErrorData::new(
395 rmcp::model::ErrorCode::INTERNAL_ERROR,
396 format!("Error analyzing symbol: {}", e),
397 None,
398 ));
399 }
400 Err(e) => {
401 return Err(ErrorData::new(
402 rmcp::model::ErrorCode::INTERNAL_ERROR,
403 format!("Task join error: {}", e),
404 None,
405 ));
406 }
407 };
408
409 if params.output_control.summary.is_none()
412 && params.output_control.force != Some(true)
413 && output.formatted.len() > SIZE_LIMIT
414 {
415 let path_owned2 = Path::new(¶ms.path).to_path_buf();
416 let symbol_owned2 = params.symbol.clone();
417 let follow_depth2 = params.follow_depth.unwrap_or(1);
418 let max_depth2 = params.max_depth;
419 let ast_recursion_limit2 = params.ast_recursion_limit;
420 let counter2 = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
421 let ct2 = ct.clone();
422 let summary_result = tokio::task::spawn_blocking(move || {
423 analyze::analyze_focused_with_progress(
424 &path_owned2,
425 &symbol_owned2,
426 follow_depth2,
427 max_depth2,
428 ast_recursion_limit2,
429 counter2,
430 ct2,
431 true, )
433 })
434 .await;
435 match summary_result {
436 Ok(Ok(summary_output)) => {
437 output.formatted = summary_output.formatted;
438 }
439 _ => {
440 let estimated_tokens = output.formatted.len() / 4;
442 let message = format!(
443 "Output exceeds 50K chars ({} chars, ~{} tokens). Use summary=true or force=true.",
444 output.formatted.len(),
445 estimated_tokens
446 );
447 return Err(ErrorData::new(
448 rmcp::model::ErrorCode::INVALID_REQUEST,
449 message,
450 None,
451 ));
452 }
453 }
454 } else if output.formatted.len() > SIZE_LIMIT
455 && params.output_control.force != Some(true)
456 && params.output_control.summary == Some(false)
457 {
458 let estimated_tokens = output.formatted.len() / 4;
460 let message = format!(
461 "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
462 - force=true to return full output\n\
463 - summary=true to get compact summary\n\
464 - Narrow your scope (smaller directory, specific file)",
465 output.formatted.len(),
466 estimated_tokens
467 );
468 return Err(ErrorData::new(
469 rmcp::model::ErrorCode::INVALID_REQUEST,
470 message,
471 None,
472 ));
473 }
474
475 Ok(output)
476 }
477
478 #[instrument(skip(self, context))]
479 #[tool(
480 name = "analyze_directory",
481 description = "Analyze directory structure and code metrics. Returns a tree with LOC, function count, class count, and test file markers. Respects .gitignore. Use max_depth to limit traversal depth (recommended 2-3 for large monorepos). Output auto-summarizes at 50K chars; use summary=true to force compact output. Paginate large results with cursor and page_size.",
482 output_schema = schema_for_type::<analyze::AnalysisOutput>(),
483 annotations(
484 title = "Analyze Directory",
485 read_only_hint = true,
486 destructive_hint = false,
487 idempotent_hint = true,
488 open_world_hint = false
489 )
490 )]
491 async fn analyze_directory(
492 &self,
493 params: Parameters<AnalyzeDirectoryParams>,
494 context: RequestContext<RoleServer>,
495 ) -> Result<CallToolResult, ErrorData> {
496 let params = params.0;
497 let ct = context.ct.clone();
498
499 let mut output = self.handle_overview_mode(¶ms, ct).await?;
501
502 let use_summary = if params.output_control.force == Some(true) {
504 false
505 } else if params.output_control.summary == Some(true) {
506 true
507 } else if params.output_control.summary == Some(false) {
508 false
509 } else {
510 output.formatted.len() > SIZE_LIMIT
511 };
512
513 if use_summary {
514 output.formatted = format_summary(
515 &output.entries,
516 &output.files,
517 params.max_depth,
518 Some(Path::new(¶ms.path)),
519 );
520 }
521
522 let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
524 let offset = if let Some(ref cursor_str) = params.pagination.cursor {
525 let cursor_data = decode_cursor(cursor_str).map_err(|e| {
526 ErrorData::new(rmcp::model::ErrorCode::INVALID_PARAMS, e.to_string(), None)
527 })?;
528 cursor_data.offset
529 } else {
530 0
531 };
532
533 let paginated = paginate_slice(&output.files, offset, page_size, PaginationMode::Default)
535 .map_err(|e| {
536 ErrorData::new(rmcp::model::ErrorCode::INTERNAL_ERROR, e.to_string(), None)
537 })?;
538
539 if paginated.next_cursor.is_some() || offset > 0 {
540 output.formatted = format_structure_paginated(
541 &paginated.items,
542 paginated.total,
543 params.max_depth,
544 Some(Path::new(¶ms.path)),
545 );
546 }
547
548 output.next_cursor = paginated.next_cursor.clone();
550
551 let mut final_text = output.formatted.clone();
553 if let Some(cursor) = paginated.next_cursor {
554 final_text.push('\n');
555 final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
556 }
557
558 let mut result = CallToolResult::success(vec![Content::text(final_text)]);
559 let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
560 result.structured_content = Some(structured);
561 Ok(result)
562 }
563
564 #[instrument(skip(self, context))]
565 #[tool(
566 name = "analyze_file",
567 description = "Extract semantic structure from a single source file. Returns functions with signatures, types, and line ranges; class and method definitions with inheritance, fields, and imports. Supports pagination for large files via cursor/page_size. Use summary=true for compact output.",
568 output_schema = schema_for_type::<analyze::FileAnalysisOutput>(),
569 annotations(
570 title = "Analyze File",
571 read_only_hint = true,
572 destructive_hint = false,
573 idempotent_hint = true,
574 open_world_hint = false
575 )
576 )]
577 async fn analyze_file(
578 &self,
579 params: Parameters<AnalyzeFileParams>,
580 context: RequestContext<RoleServer>,
581 ) -> Result<CallToolResult, ErrorData> {
582 let params = params.0;
583 let _ct = context.ct.clone();
584
585 let arc_output = self.handle_file_details_mode(¶ms).await?;
587
588 let mut formatted = arc_output.formatted.clone();
592 let line_count = arc_output.line_count;
593
594 let use_summary = if params.output_control.force == Some(true) {
596 false
597 } else if params.output_control.summary == Some(true) {
598 true
599 } else if params.output_control.summary == Some(false) {
600 false
601 } else {
602 formatted.len() > SIZE_LIMIT
603 };
604
605 if use_summary {
606 formatted = format_file_details_summary(&arc_output.semantic, ¶ms.path, line_count);
607 } else if formatted.len() > SIZE_LIMIT && params.output_control.force != Some(true) {
608 let estimated_tokens = formatted.len() / 4;
609 let message = format!(
610 "Output exceeds 50K chars ({} chars, ~{} tokens). Use one of:\n\
611 - force=true to return full output\n\
612 - Narrow your scope (smaller directory, specific file)\n\
613 - Use analyze_symbol mode for targeted analysis\n\
614 - Reduce max_depth parameter",
615 formatted.len(),
616 estimated_tokens
617 );
618 return Err(ErrorData::new(
619 rmcp::model::ErrorCode::INVALID_REQUEST,
620 message,
621 None,
622 ));
623 }
624
625 let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
627 let offset = if let Some(ref cursor_str) = params.pagination.cursor {
628 let cursor_data = decode_cursor(cursor_str).map_err(|e| {
629 ErrorData::new(rmcp::model::ErrorCode::INVALID_PARAMS, e.to_string(), None)
630 })?;
631 cursor_data.offset
632 } else {
633 0
634 };
635
636 let paginated = paginate_slice(
638 &arc_output.semantic.functions,
639 offset,
640 page_size,
641 PaginationMode::Default,
642 )
643 .map_err(|e| ErrorData::new(rmcp::model::ErrorCode::INTERNAL_ERROR, e.to_string(), None))?;
644
645 if paginated.next_cursor.is_some() || offset > 0 {
647 formatted = format_file_details_paginated(
648 &paginated.items,
649 paginated.total,
650 &arc_output.semantic,
651 ¶ms.path,
652 line_count,
653 offset,
654 );
655 }
656
657 let next_cursor = paginated.next_cursor.clone();
659
660 let mut final_text = formatted.clone();
662 if let Some(ref cursor) = next_cursor {
663 final_text.push('\n');
664 final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
665 }
666
667 let response_output = analyze::FileAnalysisOutput {
669 formatted,
670 semantic: arc_output.semantic.clone(),
671 line_count,
672 next_cursor,
673 };
674
675 let mut result = CallToolResult::success(vec![Content::text(final_text)]);
676 let structured = serde_json::to_value(&response_output).unwrap_or(Value::Null);
677 result.structured_content = Some(structured);
678 Ok(result)
679 }
680
681 #[instrument(skip(self, context))]
682 #[tool(
683 name = "analyze_symbol",
684 description = "Build call graph for a named function or method across all files in a directory. Returns direct callers and callees. Symbol lookup is case-sensitive exact-match. Use follow_depth to trace deeper chains. Use cursor/page_size to paginate call chains when results exceed page_size.",
685 output_schema = schema_for_type::<analyze::FocusedAnalysisOutput>(),
686 annotations(
687 title = "Analyze Symbol",
688 read_only_hint = true,
689 destructive_hint = false,
690 idempotent_hint = true,
691 open_world_hint = false
692 )
693 )]
694 async fn analyze_symbol(
695 &self,
696 params: Parameters<AnalyzeSymbolParams>,
697 context: RequestContext<RoleServer>,
698 ) -> Result<CallToolResult, ErrorData> {
699 let params = params.0;
700 let ct = context.ct.clone();
701
702 let mut output = self.handle_focused_mode(¶ms, ct).await?;
704
705 let page_size = params.pagination.page_size.unwrap_or(DEFAULT_PAGE_SIZE);
707 let offset = if let Some(ref cursor_str) = params.pagination.cursor {
708 let cursor_data = decode_cursor(cursor_str).map_err(|e| {
709 ErrorData::new(rmcp::model::ErrorCode::INVALID_PARAMS, e.to_string(), None)
710 })?;
711 cursor_data.offset
712 } else {
713 0
714 };
715
716 let cursor_mode = if let Some(ref cursor_str) = params.pagination.cursor {
718 decode_cursor(cursor_str)
719 .map(|c| c.mode)
720 .unwrap_or(PaginationMode::Callers)
721 } else {
722 PaginationMode::Callers
723 };
724
725 let paginated_next_cursor = match cursor_mode {
726 PaginationMode::Callers => {
727 let (paginated_items, paginated_next) = paginate_focus_chains(
728 &output.prod_chains,
729 PaginationMode::Callers,
730 offset,
731 page_size,
732 )?;
733
734 if paginated_next.is_some() || offset > 0 {
735 let base_path = Path::new(¶ms.path);
736 output.formatted = format_focused_paginated(
737 &paginated_items,
738 output.prod_chains.len(),
739 PaginationMode::Callers,
740 ¶ms.symbol,
741 &output.prod_chains,
742 &output.test_chains,
743 &output.outgoing_chains,
744 output.def_count,
745 offset,
746 Some(base_path),
747 );
748 paginated_next
749 } else {
750 None
751 }
752 }
753 PaginationMode::Callees => {
754 let (paginated_items, paginated_next) = paginate_focus_chains(
755 &output.outgoing_chains,
756 PaginationMode::Callees,
757 offset,
758 page_size,
759 )?;
760
761 if paginated_next.is_some() || offset > 0 {
762 let base_path = Path::new(¶ms.path);
763 output.formatted = format_focused_paginated(
764 &paginated_items,
765 output.outgoing_chains.len(),
766 PaginationMode::Callees,
767 ¶ms.symbol,
768 &output.prod_chains,
769 &output.test_chains,
770 &output.outgoing_chains,
771 output.def_count,
772 offset,
773 Some(base_path),
774 );
775 paginated_next
776 } else {
777 None
778 }
779 }
780 PaginationMode::Default => {
781 unreachable!("SymbolFocus should only use Callers or Callees modes")
782 }
783 };
784
785 let mut final_text = output.formatted.clone();
787 if let Some(cursor) = paginated_next_cursor {
788 final_text.push('\n');
789 final_text.push_str(&format!("NEXT_CURSOR: {}", cursor));
790 }
791
792 let mut result = CallToolResult::success(vec![Content::text(final_text)]);
793 let structured = serde_json::to_value(&output).unwrap_or(Value::Null);
794 result.structured_content = Some(structured);
795 Ok(result)
796 }
797}
798
799#[tool_handler]
800impl ServerHandler for CodeAnalyzer {
801 fn get_info(&self) -> InitializeResult {
802 let capabilities = ServerCapabilities::builder()
803 .enable_logging()
804 .enable_tools()
805 .enable_tool_list_changed()
806 .enable_completions()
807 .build();
808 let server_info = Implementation::new("code-analyze-mcp", env!("CARGO_PKG_VERSION"))
809 .with_title("Code Analyze MCP")
810 .with_description("MCP server for code structure analysis using tree-sitter");
811 InitializeResult::new(capabilities)
812 .with_server_info(server_info)
813 .with_instructions("Use analyze_directory to map a codebase (pass a directory). Use analyze_file to extract functions, classes, and imports from a specific file (pass a file path). Use analyze_symbol to trace call graphs for a named function or class (pass a directory and set symbol to the function name, case-sensitive). Prefer summary=true on large directories to reduce output size. When the response includes next_cursor, pass it back as cursor to retrieve the next page.")
814 }
815
816 async fn on_initialized(&self, context: NotificationContext<RoleServer>) {
817 let mut peer_lock = self.peer.lock().await;
818 *peer_lock = Some(context.peer.clone());
819 drop(peer_lock);
820
821 let peer = self.peer.clone();
823 let event_rx = self.event_rx.clone();
824
825 tokio::spawn(async move {
826 let rx = {
827 let mut rx_lock = event_rx.lock().await;
828 rx_lock.take()
829 };
830
831 if let Some(mut receiver) = rx {
832 let mut buffer = Vec::with_capacity(64);
833 loop {
834 receiver.recv_many(&mut buffer, 64).await;
836
837 if buffer.is_empty() {
838 break;
840 }
841
842 let peer_lock = peer.lock().await;
844 if let Some(peer) = peer_lock.as_ref() {
845 for log_event in buffer.drain(..) {
846 let notification = ServerNotification::LoggingMessageNotification(
847 Notification::new(LoggingMessageNotificationParam {
848 level: log_event.level,
849 logger: Some(log_event.logger),
850 data: log_event.data,
851 }),
852 );
853 if let Err(e) = peer.send_notification(notification).await {
854 warn!("Failed to send logging notification: {}", e);
855 }
856 }
857 }
858 }
859 }
860 });
861 }
862
863 #[instrument(skip(self, _context))]
864 async fn on_cancelled(
865 &self,
866 notification: CancelledNotificationParam,
867 _context: NotificationContext<RoleServer>,
868 ) {
869 tracing::info!(
870 request_id = ?notification.request_id,
871 reason = ?notification.reason,
872 "Received cancellation notification"
873 );
874 }
875
876 #[instrument(skip(self, _context))]
877 async fn complete(
878 &self,
879 request: CompleteRequestParams,
880 _context: RequestContext<RoleServer>,
881 ) -> Result<CompleteResult, ErrorData> {
882 let argument_name = &request.argument.name;
884 let argument_value = &request.argument.value;
885
886 let completions = match argument_name.as_str() {
887 "path" => {
888 let root = Path::new(".");
890 completion::path_completions(root, argument_value)
891 }
892 "symbol" => {
893 let path_arg = request
895 .context
896 .as_ref()
897 .and_then(|ctx| ctx.get_argument("path"));
898
899 match path_arg {
900 Some(path_str) => {
901 let path = Path::new(path_str);
902 completion::symbol_completions(&self.cache, path, argument_value)
903 }
904 None => Vec::new(),
905 }
906 }
907 _ => Vec::new(),
908 };
909
910 let total_count = completions.len() as u32;
912 let (values, has_more) = if completions.len() > 100 {
913 (completions.into_iter().take(100).collect(), true)
914 } else {
915 (completions, false)
916 };
917
918 let completion_info =
919 match CompletionInfo::with_pagination(values, Some(total_count), has_more) {
920 Ok(info) => info,
921 Err(_) => {
922 CompletionInfo::with_all_values(Vec::new())
924 .unwrap_or_else(|_| CompletionInfo::new(Vec::new()).unwrap())
925 }
926 };
927
928 Ok(CompleteResult::new(completion_info))
929 }
930
931 async fn set_level(
932 &self,
933 params: SetLevelRequestParams,
934 _context: RequestContext<RoleServer>,
935 ) -> Result<(), ErrorData> {
936 let level_filter = match params.level {
937 LoggingLevel::Debug => LevelFilter::DEBUG,
938 LoggingLevel::Info => LevelFilter::INFO,
939 LoggingLevel::Notice => LevelFilter::INFO,
940 LoggingLevel::Warning => LevelFilter::WARN,
941 LoggingLevel::Error => LevelFilter::ERROR,
942 LoggingLevel::Critical => LevelFilter::ERROR,
943 LoggingLevel::Alert => LevelFilter::ERROR,
944 LoggingLevel::Emergency => LevelFilter::ERROR,
945 };
946
947 let mut filter_lock = self.log_level_filter.lock().unwrap();
948 *filter_lock = level_filter;
949 Ok(())
950 }
951}
952
953#[cfg(test)]
954mod tests {
955 use super::*;
956
957 #[tokio::test]
958 async fn test_emit_progress_none_peer_is_noop() {
959 let peer = Arc::new(TokioMutex::new(None));
960 let log_level_filter = Arc::new(Mutex::new(LevelFilter::INFO));
961 let (_tx, rx) = tokio::sync::mpsc::unbounded_channel();
962 let analyzer = CodeAnalyzer::new(peer, log_level_filter, rx);
963 let token = ProgressToken(NumberOrString::String("test".into()));
964 analyzer
966 .emit_progress(None, &token, 0.0, 10.0, "test".to_string())
967 .await;
968 }
969}