1use crate::Config;
4use rmcp::{
5 handler::server::{router::tool::ToolRouter, tool::Parameters},
6 model::*,
7 service::RequestContext,
8 tool, tool_handler, tool_router, ErrorData as McpError, RoleServer, ServerHandler, ServiceExt,
9};
10use serde::Deserialize;
11use tracing::{debug, info, warn};
12
13use codeprism_analysis::CodeAnalyzer;
15use codeprism_core::graph::DependencyType;
16use codeprism_core::{
17 ContentSearchManager, GraphQuery, GraphStore, InheritanceFilter, LanguageRegistry,
18 NoOpProgressReporter, NodeKind, RepositoryConfig, RepositoryManager, RepositoryScanner,
19 SearchQueryBuilder,
20};
21use std::path::PathBuf;
22use std::sync::Arc;
23
24#[derive(Debug, Deserialize, schemars::JsonSchema)]
26pub struct TracePathParams {
27 pub source: String,
28 pub target: String,
29 pub max_depth: Option<u32>,
30}
31
32#[derive(Debug, Deserialize, schemars::JsonSchema)]
33pub struct FindDependenciesParams {
34 pub target: String,
35 pub dependency_type: Option<String>,
36}
37
38#[derive(Debug, Deserialize, schemars::JsonSchema)]
39pub struct FindReferencesParams {
40 pub symbol_id: String,
41 pub include_definitions: Option<bool>,
42 pub context_lines: Option<u32>,
43}
44
45#[derive(Debug, Deserialize, schemars::JsonSchema)]
46pub struct ExplainSymbolParams {
47 pub symbol_id: String,
48 pub include_dependencies: Option<bool>,
49 pub include_usages: Option<bool>,
50 pub context_lines: Option<u32>,
51}
52
53#[derive(Debug, Deserialize, schemars::JsonSchema)]
54pub struct SearchSymbolsParams {
55 pub pattern: String,
56 pub symbol_types: Option<Vec<String>>,
57 pub inheritance_filters: Option<Vec<String>>,
58 pub limit: Option<u32>,
59 pub context_lines: Option<u32>,
60}
61
62#[derive(Debug, Deserialize, schemars::JsonSchema)]
63pub struct SearchContentParams {
64 pub query: String,
65 pub file_types: Option<Vec<String>>,
66 pub case_sensitive: Option<bool>,
67 pub regex: Option<bool>,
68 pub limit: Option<u32>,
69}
70
71#[derive(Debug, Deserialize, schemars::JsonSchema)]
72pub struct FindPatternsParams {
73 pub pattern: String,
74 pub pattern_type: Option<String>,
75 pub file_types: Option<Vec<String>>,
76 pub limit: Option<u32>,
77}
78
79#[derive(Debug, Deserialize, schemars::JsonSchema)]
80pub struct SearchByTypeParams {
81 pub symbol_types: Vec<String>,
82 pub include_inherited: Option<bool>,
83 pub file_patterns: Option<Vec<String>>,
84 pub limit: Option<usize>,
85}
86
87#[derive(Debug, Deserialize, schemars::JsonSchema)]
88pub struct SemanticSearchParams {
89 pub concept: String,
90 pub context: Option<String>,
91 pub relevance_threshold: Option<f32>,
92 pub include_similar: Option<bool>,
93 pub limit: Option<usize>,
94}
95
96#[derive(Debug, Deserialize, schemars::JsonSchema)]
97pub struct AdvancedSearchParams {
98 pub query: String,
99 pub file_types: Option<Vec<String>>,
100 pub symbol_types: Option<Vec<String>>,
101 pub date_range: Option<String>,
102 pub size_range: Option<String>,
103 pub complexity_filter: Option<String>,
104 pub exclude_patterns: Option<Vec<String>>,
105 pub include_tests: Option<bool>,
106 pub include_dependencies: Option<bool>,
107 pub limit: Option<usize>,
108}
109
110#[derive(Debug, Deserialize, schemars::JsonSchema)]
111pub struct ProvideGuidanceParams {
112 pub target: String,
113 pub guidance_type: Option<String>,
114 pub include_examples: Option<bool>,
115 pub priority_level: Option<String>,
116}
117
118#[derive(Debug, Deserialize, schemars::JsonSchema)]
119pub struct OptimizeCodeParams {
120 pub target: String,
121 pub optimization_types: Option<Vec<String>>,
122 pub aggressive_mode: Option<bool>,
123 pub max_suggestions: Option<usize>,
124}
125
126#[derive(Debug, Deserialize, schemars::JsonSchema)]
127pub struct BatchProcessParams {
128 pub operation: String,
129 pub targets: Vec<String>,
130 pub parameters: Option<serde_json::Value>,
131 pub max_concurrent: Option<usize>,
132 pub fail_fast: Option<bool>,
133}
134
135#[derive(Debug, Deserialize, schemars::JsonSchema)]
136pub struct WorkflowAutomationParams {
137 pub workflow_type: String,
138 pub target_scope: Option<String>,
139 pub automation_level: Option<String>,
140 pub dry_run: Option<bool>,
141}
142
143#[derive(Debug, Deserialize, schemars::JsonSchema)]
146pub struct AnalyzeComplexityParams {
147 pub target: String,
148 pub metrics: Option<Vec<String>>,
149 pub threshold_warnings: Option<bool>,
150}
151
152#[derive(Debug, Deserialize, schemars::JsonSchema)]
153pub struct AnalyzePerformanceParams {
154 pub target: String,
155 pub analysis_types: Option<Vec<String>>,
156 pub complexity_threshold: Option<String>,
157}
158
159#[derive(Debug, Deserialize, schemars::JsonSchema)]
160pub struct AnalyzeSecurityParams {
161 pub target: String,
162 pub vulnerability_types: Option<Vec<String>>,
163 pub severity_threshold: Option<String>,
164}
165
166#[derive(Debug, Deserialize, schemars::JsonSchema)]
167pub struct AnalyzeDependenciesParams {
168 pub target: Option<String>,
169 pub dependency_type: Option<String>,
170 pub max_depth: Option<u32>,
171 pub include_transitive: Option<bool>,
172}
173
174#[derive(Debug, Deserialize, schemars::JsonSchema)]
175pub struct AnalyzeControlFlowParams {
176 pub target: String,
177 pub analysis_types: Option<Vec<String>>,
178 pub max_depth: Option<u32>,
179 pub include_paths: Option<bool>,
180}
181
182#[derive(Debug, Clone, Deserialize, schemars::JsonSchema)]
183pub struct AnalyzeCodeQualityParams {
184 pub target: String,
185 pub quality_types: Option<Vec<String>>,
186 pub severity_threshold: Option<String>,
187 pub include_recommendations: Option<bool>,
188 pub detailed_analysis: Option<bool>,
189}
190
191#[derive(Debug, Clone, Deserialize, schemars::JsonSchema)]
192pub struct AnalyzeJavaScriptParams {
193 pub target: String,
194 pub analysis_types: Option<Vec<String>>,
195 pub es_target: Option<String>,
196 pub framework_hints: Option<Vec<String>>,
197 pub include_recommendations: Option<bool>,
198 pub detailed_analysis: Option<bool>,
199}
200
201#[derive(Debug, Clone, Deserialize, schemars::JsonSchema)]
202pub struct SpecializedAnalysisParams {
203 pub target: String,
204 pub analysis_domains: Option<Vec<String>>,
205 pub domain_options: Option<serde_json::Value>,
206 pub rule_sets: Option<Vec<String>>,
207 pub severity_threshold: Option<String>,
208 pub include_recommendations: Option<bool>,
209 pub detailed_analysis: Option<bool>,
210}
211
212#[derive(Clone)]
214#[allow(dead_code)] pub struct CodePrismMcpServer {
216 config: Config,
218 tool_router: ToolRouter<CodePrismMcpServer>,
220 graph_store: Arc<GraphStore>,
222 graph_query: Arc<GraphQuery>,
224 repository_scanner: Arc<RepositoryScanner>,
226 content_search: Arc<ContentSearchManager>,
228 repository_manager: Arc<RepositoryManager>,
230 repository_path: Option<PathBuf>,
232 code_analyzer: Arc<CodeAnalyzer>,
234}
235
236#[tool_router]
237impl CodePrismMcpServer {
238 pub async fn new(config: Config) -> std::result::Result<Self, crate::Error> {
240 info!("Initializing CodePrism MCP Server");
241
242 config.validate()?;
244
245 debug!("Server configuration validated successfully");
246
247 let graph_store = Arc::new(GraphStore::new());
249 let graph_query = Arc::new(GraphQuery::new(Arc::clone(&graph_store)));
250 let repository_scanner = Arc::new(RepositoryScanner::new());
251 let content_search = Arc::new(ContentSearchManager::new());
252
253 let language_registry = Arc::new(LanguageRegistry::new());
255 let repository_manager = Arc::new(RepositoryManager::new(language_registry));
256
257 let code_analyzer = Arc::new(CodeAnalyzer::new());
259
260 Ok(Self {
261 config,
262 tool_router: Self::tool_router(),
263 graph_store,
264 graph_query,
265 repository_scanner,
266 content_search,
267 repository_manager,
268 repository_path: None,
269 code_analyzer,
270 })
271 }
272
273 #[tool(description = "Simple ping tool that responds with pong")]
275 fn ping(&self) -> std::result::Result<CallToolResult, McpError> {
276 info!("Ping tool called");
277
278 let response_data = serde_json::json!({
279 "status": "success",
280 "message": "pong",
281 "timestamp": chrono::Utc::now().to_rfc3339(),
282 "server": "codeprism-mcp-server"
283 });
284
285 Ok(crate::response::create_dual_response(&response_data))
286 }
287
288 #[tool(description = "Get server version and configuration information")]
290 fn version(&self) -> std::result::Result<CallToolResult, McpError> {
291 info!("Version tool called");
292
293 let version_info = serde_json::json!({
294 "server_name": self.config.server().name,
295 "server_version": self.config.server().version,
296 "mcp_protocol_version": crate::MCP_VERSION,
297 "tools_enabled": {
298 "core": self.config.tools().enable_core,
299 "search": self.config.tools().enable_search,
300 "analysis": self.config.tools().enable_analysis,
301 "workflow": self.config.tools().enable_workflow
302 }
303 });
304
305 Ok(crate::response::create_dual_response(&version_info))
306 }
307
308 #[tool(description = "Get system information including OS, memory, and environment")]
310 fn system_info(&self) -> std::result::Result<CallToolResult, McpError> {
311 info!("System info tool called");
312
313 let current_time = chrono::Utc::now();
314 let system_info = serde_json::json!({
315 "status": "success",
316 "timestamp": current_time.to_rfc3339(),
317 "system": {
318 "os": std::env::consts::OS,
319 "arch": std::env::consts::ARCH,
320 "family": std::env::consts::FAMILY,
321 "rust_version": env!("CARGO_PKG_VERSION")
322 },
323 "server_config": {
324 "name": self.config.server().name,
325 "version": self.config.server().version,
326 "max_concurrent_tools": self.config.server().max_concurrent_tools,
327 "request_timeout_secs": self.config.server().request_timeout_secs
328 }
329 });
330
331 Ok(crate::response::create_dual_response(&system_info))
332 }
333
334 #[tool(description = "Perform health check on server components")]
336 fn health_check(&self) -> std::result::Result<CallToolResult, McpError> {
337 info!("Health check tool called");
338
339 let health_status = serde_json::json!({
340 "status": "healthy",
341 "timestamp": chrono::Utc::now().to_rfc3339(),
342 "components": {
343 "server": "operational",
344 "tools": "available",
345 "config": "valid",
346 "graph_store": "operational",
347 "content_search": "operational",
348 "repository_manager": "operational"
349 },
350 "uptime_seconds": std::time::SystemTime::now()
351 .duration_since(std::time::UNIX_EPOCH)
352 .unwrap_or_default()
353 .as_secs(),
354 "checks_performed": 6,
355 "all_systems_operational": true
356 });
357
358 Ok(crate::response::create_dual_response(&health_status))
359 }
360
361 #[tool(description = "Find the shortest path between two code symbols")]
365 fn trace_path(
366 &self,
367 Parameters(params): Parameters<TracePathParams>,
368 ) -> std::result::Result<CallToolResult, McpError> {
369 info!(
370 "Trace path tool called: {} -> {}",
371 params.source, params.target
372 );
373
374 let max_depth = params.max_depth.unwrap_or(10) as usize;
375
376 let source_id = match codeprism_core::NodeId::from_hex(¶ms.source) {
378 Ok(id) => id,
379 Err(_) => {
380 let error_msg = format!(
381 "Invalid source symbol ID format: {}. Expected hexadecimal string.",
382 params.source
383 );
384 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
385 }
386 };
387
388 let target_id = match codeprism_core::NodeId::from_hex(¶ms.target) {
390 Ok(id) => id,
391 Err(_) => {
392 let error_msg = format!(
393 "Invalid target symbol ID format: {}. Expected hexadecimal string.",
394 params.target
395 );
396 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
397 }
398 };
399
400 let path_result = self
402 .graph_query
403 .find_path(&source_id, &target_id, Some(max_depth));
404
405 let result = match path_result {
406 Ok(Some(path)) => {
407 let path_nodes: Vec<_> = path
409 .path
410 .iter()
411 .filter_map(|node_id| self.graph_store.get_node(node_id))
412 .map(|node| {
413 serde_json::json!({
414 "id": node.id.to_hex(),
415 "name": node.name,
416 "kind": format!("{:?}", node.kind),
417 "language": format!("{:?}", node.lang),
418 "file": node.file.display().to_string(),
419 "span": {
420 "start_byte": node.span.start_byte,
421 "end_byte": node.span.end_byte,
422 "start_line": node.span.start_line,
423 "start_column": node.span.start_column,
424 "end_line": node.span.end_line,
425 "end_column": node.span.end_column,
426 }
427 })
428 })
429 .collect();
430
431 let path_edges: Vec<_> = path
432 .edges
433 .iter()
434 .map(|edge| {
435 serde_json::json!({
436 "source": edge.source.to_hex(),
437 "target": edge.target.to_hex(),
438 "kind": format!("{:?}", edge.kind),
439 })
440 })
441 .collect();
442
443 serde_json::json!({
444 "status": "success",
445 "path_found": true,
446 "source_id": params.source,
447 "target_id": params.target,
448 "distance": path.distance,
449 "path_length": path.path.len(),
450 "nodes": path_nodes,
451 "edges": path_edges,
452 "query": {
453 "source": params.source,
454 "target": params.target,
455 "max_depth": max_depth
456 }
457 })
458 }
459 Ok(None) => {
460 serde_json::json!({
461 "status": "success",
462 "path_found": false,
463 "source_id": params.source,
464 "target_id": params.target,
465 "message": format!("No path found between {} and {} within {} hops", params.source, params.target, max_depth),
466 "query": {
467 "source": params.source,
468 "target": params.target,
469 "max_depth": max_depth
470 }
471 })
472 }
473 Err(e) => {
474 serde_json::json!({
475 "status": "error",
476 "message": format!("Path finding failed: {e}"),
477 "query": {
478 "source": params.source,
479 "target": params.target,
480 "max_depth": max_depth
481 }
482 })
483 }
484 };
485
486 Ok(CallToolResult::success(vec![Content::text(
487 serde_json::to_string_pretty(&result)
488 .unwrap_or_else(|_| "Error formatting response".to_string()),
489 )]))
490 }
491
492 #[tool(description = "Analyze dependencies for a code symbol or file")]
494 fn find_dependencies(
495 &self,
496 Parameters(params): Parameters<FindDependenciesParams>,
497 ) -> std::result::Result<CallToolResult, McpError> {
498 info!("Find dependencies tool called for: {}", params.target);
499
500 let dep_type_str = params
501 .dependency_type
502 .unwrap_or_else(|| "direct".to_string());
503
504 let dependency_type = match dep_type_str.as_str() {
506 "direct" => DependencyType::Direct,
507 "calls" => DependencyType::Calls,
508 "imports" => DependencyType::Imports,
509 "reads" => DependencyType::Reads,
510 "writes" => DependencyType::Writes,
511 _ => {
512 let error_msg = format!("Invalid dependency type: {dep_type_str}. Must be one of: direct, calls, imports, reads, writes");
513 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
514 }
515 };
516
517 let node_id = match codeprism_core::NodeId::from_hex(¶ms.target) {
519 Ok(id) => id,
520 Err(_) => {
521 let error_msg = format!(
522 "Invalid target symbol ID format: {}. Expected hexadecimal string.",
523 params.target
524 );
525 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
526 }
527 };
528
529 let dependencies_result = self
531 .graph_query
532 .find_dependencies(&node_id, dependency_type.clone());
533
534 let result = match dependencies_result {
535 Ok(dependencies) => {
536 serde_json::json!({
537 "status": "success",
538 "target_symbol_id": params.target,
539 "dependency_type": dep_type_str,
540 "dependencies": dependencies.iter().map(|dependency| {
541 serde_json::json!({
542 "target_symbol": {
543 "id": dependency.target_node.id.to_hex(),
544 "name": dependency.target_node.name,
545 "kind": format!("{:?}", dependency.target_node.kind),
546 "language": format!("{:?}", dependency.target_node.lang),
547 "file": dependency.target_node.file.display().to_string(),
548 "span": {
549 "start_byte": dependency.target_node.span.start_byte,
550 "end_byte": dependency.target_node.span.end_byte,
551 "start_line": dependency.target_node.span.start_line,
552 "start_column": dependency.target_node.span.start_column,
553 "end_line": dependency.target_node.span.end_line,
554 "end_column": dependency.target_node.span.end_column,
555 }
556 },
557 "edge_type": format!("{:?}", dependency.edge_kind),
558 "dependency_classification": format!("{:?}", dependency.dependency_type),
559 })
560 }).collect::<Vec<_>>(),
561 "total_dependencies": dependencies.len(),
562 "query": {
563 "target": params.target,
564 "dependency_type": dep_type_str
565 }
566 })
567 }
568 Err(e) => {
569 serde_json::json!({
570 "status": "error",
571 "message": format!("Dependency finding failed: {e}"),
572 "query": {
573 "target": params.target,
574 "dependency_type": dep_type_str
575 }
576 })
577 }
578 };
579
580 Ok(CallToolResult::success(vec![Content::text(
581 serde_json::to_string_pretty(&result)
582 .unwrap_or_else(|_| "Error formatting response".to_string()),
583 )]))
584 }
585
586 #[tool(description = "Find all references to a symbol across the codebase")]
588 fn find_references(
589 &self,
590 Parameters(params): Parameters<FindReferencesParams>,
591 ) -> std::result::Result<CallToolResult, McpError> {
592 info!("Find references tool called for: {}", params.symbol_id);
593
594 let include_defs = params.include_definitions.unwrap_or(true);
595 let context = params.context_lines.unwrap_or(4);
596
597 let node_id = match codeprism_core::NodeId::from_hex(¶ms.symbol_id) {
599 Ok(id) => id,
600 Err(_) => {
601 let error_msg = format!(
602 "Invalid symbol ID format: {}. Expected hexadecimal string.",
603 params.symbol_id
604 );
605 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
606 }
607 };
608
609 let references_result = self.graph_query.find_references(&node_id);
611
612 let result = match references_result {
613 Ok(references) => {
614 serde_json::json!({
615 "status": "success",
616 "symbol_id": params.symbol_id,
617 "references": references.iter().map(|reference| {
618 serde_json::json!({
619 "source_symbol": {
620 "id": reference.source_node.id.to_hex(),
621 "name": reference.source_node.name,
622 "kind": format!("{:?}", reference.source_node.kind),
623 "language": format!("{:?}", reference.source_node.lang),
624 "file": reference.source_node.file.display().to_string(),
625 "span": {
626 "start_byte": reference.source_node.span.start_byte,
627 "end_byte": reference.source_node.span.end_byte,
628 "start_line": reference.source_node.span.start_line,
629 "start_column": reference.source_node.span.start_column,
630 "end_line": reference.source_node.span.end_line,
631 "end_column": reference.source_node.span.end_column,
632 }
633 },
634 "reference_type": format!("{:?}", reference.edge_kind),
635 "location": {
636 "file": reference.location.file.display().to_string(),
637 "span": {
638 "start_byte": reference.location.span.start_byte,
639 "end_byte": reference.location.span.end_byte,
640 "start_line": reference.location.span.start_line,
641 "start_column": reference.location.span.start_column,
642 "end_line": reference.location.span.end_line,
643 "end_column": reference.location.span.end_column,
644 }
645 }
646 })
647 }).collect::<Vec<_>>(),
648 "total_references": references.len(),
649 "query": {
650 "symbol_id": params.symbol_id,
651 "include_definitions": include_defs,
652 "context_lines": context
653 }
654 })
655 }
656 Err(e) => {
657 serde_json::json!({
658 "status": "error",
659 "message": format!("Reference finding failed: {e}"),
660 "query": {
661 "symbol_id": params.symbol_id,
662 "include_definitions": include_defs,
663 "context_lines": context
664 }
665 })
666 }
667 };
668
669 Ok(CallToolResult::success(vec![Content::text(
670 serde_json::to_string_pretty(&result)
671 .unwrap_or_else(|_| "Error formatting response".to_string()),
672 )]))
673 }
674
675 #[tool(description = "Provide detailed explanation of a code symbol with context")]
679 fn explain_symbol(
680 &self,
681 Parameters(params): Parameters<ExplainSymbolParams>,
682 ) -> std::result::Result<CallToolResult, McpError> {
683 info!("Explain symbol tool called for: {}", params.symbol_id);
684
685 let include_deps = params.include_dependencies.unwrap_or(false);
686 let include_uses = params.include_usages.unwrap_or(false);
687 let context = params.context_lines.unwrap_or(4);
688
689 let node_id = match codeprism_core::NodeId::from_hex(¶ms.symbol_id) {
691 Ok(id) => id,
692 Err(_) => {
693 let error_msg = format!(
694 "Invalid symbol ID format: {}. Expected hexadecimal string.",
695 params.symbol_id
696 );
697 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
698 }
699 };
700
701 let symbol_node = match self.graph_store.get_node(&node_id) {
703 Some(node) => node,
704 None => {
705 let error_msg = format!("Symbol with ID {} not found in graph", params.symbol_id);
706 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
707 }
708 };
709
710 let mut explanation = serde_json::json!({
712 "status": "success",
713 "symbol": {
714 "id": symbol_node.id.to_hex(),
715 "name": symbol_node.name,
716 "kind": format!("{:?}", symbol_node.kind),
717 "language": format!("{:?}", symbol_node.lang),
718 "file": symbol_node.file.display().to_string(),
719 "span": {
720 "start_byte": symbol_node.span.start_byte,
721 "end_byte": symbol_node.span.end_byte,
722 "start_line": symbol_node.span.start_line,
723 "start_column": symbol_node.span.start_column,
724 "end_line": symbol_node.span.end_line,
725 "end_column": symbol_node.span.end_column,
726 }
727 }
728 });
729
730 if symbol_node.kind == NodeKind::Class {
732 match self.graph_query.get_inheritance_info(&node_id) {
733 Ok(inheritance_info) => {
734 explanation["inheritance"] = serde_json::json!({
735 "base_classes": inheritance_info.base_classes.iter().map(|base| {
736 serde_json::json!({
737 "name": base.class_name,
738 "relationship": base.relationship_type,
739 "file": base.file.display().to_string()
740 })
741 }).collect::<Vec<_>>(),
742 "subclasses": inheritance_info.subclasses.iter().map(|sub| {
743 serde_json::json!({
744 "name": sub.class_name,
745 "relationship": sub.relationship_type,
746 "file": sub.file.display().to_string()
747 })
748 }).collect::<Vec<_>>(),
749 "method_resolution_order": inheritance_info.method_resolution_order,
750 "is_metaclass": inheritance_info.is_metaclass
751 });
752 }
753 Err(_) => {
754 explanation["inheritance"] = serde_json::json!({
755 "note": "Inheritance information not available"
756 });
757 }
758 }
759 }
760
761 if include_deps {
763 match self
764 .graph_query
765 .find_dependencies(&node_id, DependencyType::Direct)
766 {
767 Ok(dependencies) => {
768 explanation["dependencies"] = serde_json::json!({
769 "count": dependencies.len(),
770 "items": dependencies.iter().take(10).map(|dep| {
771 serde_json::json!({
772 "name": dep.target_node.name,
773 "kind": format!("{:?}", dep.target_node.kind),
774 "file": dep.target_node.file.display().to_string(),
775 "relationship": format!("{:?}", dep.edge_kind)
776 })
777 }).collect::<Vec<_>>(),
778 "truncated": dependencies.len() > 10
779 });
780 }
781 Err(_) => {
782 explanation["dependencies"] = serde_json::json!({
783 "note": "Dependencies information not available"
784 });
785 }
786 }
787 }
788
789 if include_uses {
791 match self.graph_query.find_references(&node_id) {
792 Ok(references) => {
793 explanation["usages"] = serde_json::json!({
794 "count": references.len(),
795 "items": references.iter().take(10).map(|reference| {
796 serde_json::json!({
797 "source_name": reference.source_node.name,
798 "source_kind": format!("{:?}", reference.source_node.kind),
799 "file": reference.source_node.file.display().to_string(),
800 "relationship": format!("{:?}", reference.edge_kind),
801 "location": {
802 "line": reference.location.span.start_line,
803 "column": reference.location.span.start_column
804 }
805 })
806 }).collect::<Vec<_>>(),
807 "truncated": references.len() > 10
808 });
809 }
810 Err(_) => {
811 explanation["usages"] = serde_json::json!({
812 "note": "Usage information not available"
813 });
814 }
815 }
816 }
817
818 explanation["query"] = serde_json::json!({
820 "symbol_id": params.symbol_id,
821 "include_dependencies": include_deps,
822 "include_usages": include_uses,
823 "context_lines": context
824 });
825
826 Ok(CallToolResult::success(vec![Content::text(
827 serde_json::to_string_pretty(&explanation)
828 .unwrap_or_else(|_| "Error formatting response".to_string()),
829 )]))
830 }
831
832 #[tool(description = "Search for symbols by name pattern with advanced inheritance filtering")]
834 fn search_symbols(
835 &self,
836 Parameters(params): Parameters<SearchSymbolsParams>,
837 ) -> std::result::Result<CallToolResult, McpError> {
838 info!(
839 "Search symbols tool called with pattern: {}",
840 params.pattern
841 );
842
843 let max_results = params.limit.unwrap_or(50) as usize;
844 let context = params.context_lines.unwrap_or(4);
845
846 let node_kinds = if let Some(ref types) = params.symbol_types {
848 let mut kinds = Vec::new();
849 for sym_type in types {
850 match sym_type.as_str() {
851 "function" => kinds.push(NodeKind::Function),
852 "class" => kinds.push(NodeKind::Class),
853 "variable" => kinds.push(NodeKind::Variable),
854 "module" => kinds.push(NodeKind::Module),
855 "method" => kinds.push(NodeKind::Method),
856 _ => {
857 let error_msg = format!("Invalid symbol type: {sym_type}. Must be one of: function, class, variable, module, method");
858 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
859 }
860 }
861 }
862 Some(kinds)
863 } else {
864 None
865 };
866
867 let inheritance_filters = if let Some(ref filters) = params.inheritance_filters {
869 let mut parsed_filters = Vec::new();
870 for filter in filters {
871 if let Some(base_class) = filter.strip_prefix("inherits_from:") {
872 parsed_filters.push(InheritanceFilter::InheritsFrom(base_class.to_string()));
873 } else if let Some(metaclass) = filter.strip_prefix("metaclass:") {
874 parsed_filters.push(InheritanceFilter::HasMetaclass(metaclass.to_string()));
875 } else if let Some(mixin) = filter.strip_prefix("mixin:") {
876 parsed_filters.push(InheritanceFilter::UsesMixin(mixin.to_string()));
877 } else {
878 let error_msg = format!("Invalid inheritance filter: {filter}. Must be one of: inherits_from:<class>, metaclass:<class>, mixin:<class>");
879 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
880 }
881 }
882 Some(parsed_filters)
883 } else {
884 None
885 };
886
887 let search_result = if let Some(inheritance_filters) = inheritance_filters {
889 self.graph_query.search_symbols_with_inheritance(
890 ¶ms.pattern,
891 node_kinds,
892 Some(inheritance_filters),
893 Some(max_results),
894 )
895 } else {
896 self.graph_query
897 .search_symbols(¶ms.pattern, node_kinds, Some(max_results))
898 };
899
900 let result = match search_result {
901 Ok(symbols) => {
902 serde_json::json!({
903 "status": "success",
904 "symbols": symbols.iter().map(|symbol| {
905 serde_json::json!({
906 "id": symbol.node.id.to_hex(),
907 "name": symbol.node.name,
908 "kind": format!("{:?}", symbol.node.kind),
909 "language": format!("{:?}", symbol.node.lang),
910 "file": symbol.node.file.display().to_string(),
911 "span": {
912 "start_byte": symbol.node.span.start_byte,
913 "end_byte": symbol.node.span.end_byte,
914 "start_line": symbol.node.span.start_line,
915 "start_column": symbol.node.span.start_column,
916 "end_line": symbol.node.span.end_line,
917 "end_column": symbol.node.span.end_column,
918 },
919 "references_count": symbol.references_count,
920 "dependencies_count": symbol.dependencies_count,
921 })
922 }).collect::<Vec<_>>(),
923 "total_found": symbols.len(),
924 "query": {
925 "pattern": params.pattern,
926 "symbol_types": params.symbol_types,
927 "inheritance_filters": params.inheritance_filters,
928 "limit": max_results,
929 "context_lines": context
930 }
931 })
932 }
933 Err(e) => {
934 serde_json::json!({
935 "status": "error",
936 "message": format!("Symbol search failed: {e}"),
937 "query": {
938 "pattern": params.pattern,
939 "symbol_types": params.symbol_types,
940 "inheritance_filters": params.inheritance_filters,
941 "limit": max_results,
942 "context_lines": context
943 }
944 })
945 }
946 };
947
948 Ok(CallToolResult::success(vec![Content::text(
949 serde_json::to_string_pretty(&result)
950 .unwrap_or_else(|_| "Error formatting response".to_string()),
951 )]))
952 }
953
954 #[tool(
958 description = "Get comprehensive repository information including structure and statistics"
959 )]
960 fn get_repository_info(&self) -> std::result::Result<CallToolResult, McpError> {
961 info!("Get repository info tool called");
962
963 let result = if let Some(ref repo_path) = self.repository_path {
964 let repo_name = repo_path
966 .file_name()
967 .and_then(|n| n.to_str())
968 .unwrap_or("Unknown");
969
970 let graph_stats = self.graph_store.get_stats();
972
973 let discovered_files = tokio::task::block_in_place(|| {
975 tokio::runtime::Handle::current().block_on(async {
976 self.repository_scanner
977 .scan_repository(repo_path, Arc::new(NoOpProgressReporter))
978 .await
979 })
980 });
981
982 match discovered_files {
983 Ok(scan_result) => {
984 serde_json::json!({
985 "status": "success",
986 "repository": {
987 "name": repo_name,
988 "path": repo_path.display().to_string(),
989 "total_files": scan_result.total_files,
990 "scan_duration_ms": scan_result.duration_ms,
991 "files_by_language": scan_result.files_by_language.iter()
992 .map(|(lang, files)| (format!("{lang:?}"), files.len()))
993 .collect::<std::collections::HashMap<String, usize>>()
994 },
995 "graph_statistics": {
996 "total_nodes": graph_stats.total_nodes,
997 "total_edges": graph_stats.total_edges,
998 "total_files": graph_stats.total_files,
999 "nodes_by_kind": graph_stats.nodes_by_kind.iter()
1000 .map(|(kind, count)| (format!("{kind:?}"), *count))
1001 .collect::<std::collections::HashMap<String, usize>>()
1002 }
1003 })
1004 }
1005 Err(e) => {
1006 serde_json::json!({
1007 "status": "error",
1008 "message": format!("Failed to scan repository: {e}"),
1009 "repository": {
1010 "name": repo_name,
1011 "path": repo_path.display().to_string()
1012 }
1013 })
1014 }
1015 }
1016 } else {
1017 serde_json::json!({
1018 "status": "error",
1019 "message": "No repository configured. Call initialize_repository first.",
1020 "note": "Use the server initialization to set up a repository path"
1021 })
1022 };
1023
1024 Ok(CallToolResult::success(vec![Content::text(
1025 serde_json::to_string_pretty(&result)
1026 .unwrap_or_else(|_| "Error formatting response".to_string()),
1027 )]))
1028 }
1029
1030 #[tool(description = "Analyze project dependencies and their relationships")]
1032 fn analyze_dependencies(
1033 &self,
1034 Parameters(params): Parameters<AnalyzeDependenciesParams>,
1035 ) -> std::result::Result<CallToolResult, McpError> {
1036 info!("Analyze dependencies tool called");
1037
1038 let dependency_type_str = params.dependency_type.unwrap_or_else(|| "all".to_string());
1039 let max_depth = params.max_depth.unwrap_or(5) as usize;
1040 let include_transitive = params.include_transitive.unwrap_or(true);
1041
1042 let result = if let Some(target) = params.target.clone() {
1043 self.analyze_specific_target_dependencies(
1045 &target,
1046 &dependency_type_str,
1047 max_depth,
1048 include_transitive,
1049 )
1050 } else {
1051 self.analyze_repository_dependencies(
1053 &dependency_type_str,
1054 max_depth,
1055 include_transitive,
1056 )
1057 };
1058
1059 match result {
1060 Ok(analysis) => Ok(CallToolResult::success(vec![Content::text(
1061 serde_json::to_string_pretty(&analysis)
1062 .unwrap_or_else(|_| "Error formatting response".to_string()),
1063 )])),
1064 Err(e) => {
1065 let error_result = serde_json::json!({
1066 "status": "error",
1067 "message": format!("Dependency analysis failed: {e}"),
1068 "target": params.target,
1069 "dependency_type": dependency_type_str,
1070 "max_depth": max_depth,
1071 "include_transitive": include_transitive
1072 });
1073
1074 Ok(CallToolResult::success(vec![Content::text(
1075 serde_json::to_string_pretty(&error_result)
1076 .unwrap_or_else(|_| "Error formatting response".to_string()),
1077 )]))
1078 }
1079 }
1080 }
1081
1082 #[tool(description = "Search for content across files in the codebase")]
1086 fn search_content(
1087 &self,
1088 Parameters(params): Parameters<SearchContentParams>,
1089 ) -> std::result::Result<CallToolResult, McpError> {
1090 info!("Search content tool called with query: {}", params.query);
1091
1092 let case_sens = params.case_sensitive.unwrap_or(false);
1093 let use_regex = params.regex.unwrap_or(false);
1094 let max_results = params.limit.unwrap_or(100) as usize;
1095
1096 let _repo_path = match &self.repository_path {
1098 Some(path) => path.clone(),
1099 None => {
1100 let error_msg = "No repository configured. Call initialize_repository first.";
1101 return Ok(CallToolResult::error(vec![Content::text(
1102 error_msg.to_string(),
1103 )]));
1104 }
1105 };
1106
1107 let mut query_builder = SearchQueryBuilder::new(¶ms.query).max_results(max_results);
1109
1110 if case_sens {
1111 query_builder = query_builder.case_sensitive();
1112 }
1113
1114 if use_regex {
1115 query_builder = query_builder.use_regex();
1116 }
1117
1118 if let Some(ref file_types) = params.file_types {
1120 let file_patterns = file_types.iter().map(|ext| format!("*.{ext}")).collect();
1121 query_builder = query_builder.include_files(file_patterns);
1122 }
1123
1124 let search_query = query_builder.build();
1125
1126 let search_result = self.content_search.search(&search_query);
1128
1129 let result = match search_result {
1130 Ok(search_results) => {
1131 serde_json::json!({
1132 "status": "success",
1133 "query_text": params.query,
1134 "results": search_results.iter().map(|result| {
1135 serde_json::json!({
1136 "file": result.chunk.file_path.display().to_string(),
1137 "content_type": format!("{:?}", result.chunk.content_type),
1138 "relevance_score": result.score,
1139 "matches": result.matches.iter().map(|match_item| {
1140 serde_json::json!({
1141 "matched_text": match_item.text,
1142 "line_number": match_item.line_number,
1143 "column_number": match_item.column_number,
1144 "position": match_item.position,
1145 "context_before": match_item.context_before,
1146 "context_after": match_item.context_after
1147 })
1148 }).collect::<Vec<_>>(),
1149 "chunk_content": if result.chunk.content.len() > 500 {
1150 format!("{}...", &result.chunk.content[..500])
1151 } else {
1152 result.chunk.content.clone()
1153 }
1154 })
1155 }).collect::<Vec<_>>(),
1156 "total_results": search_results.len(),
1157 "search_settings": {
1158 "case_sensitive": case_sens,
1159 "regex": use_regex,
1160 "file_types": params.file_types,
1161 "max_results": max_results
1162 }
1163 })
1164 }
1165 Err(e) => {
1166 serde_json::json!({
1167 "status": "error",
1168 "message": format!("Content search failed: {e}"),
1169 "query": {
1170 "query": params.query,
1171 "file_types": params.file_types,
1172 "case_sensitive": case_sens,
1173 "regex": use_regex,
1174 "limit": max_results
1175 }
1176 })
1177 }
1178 };
1179
1180 Ok(CallToolResult::success(vec![Content::text(
1181 serde_json::to_string_pretty(&result)
1182 .unwrap_or_else(|_| "Error formatting response".to_string()),
1183 )]))
1184 }
1185
1186 #[tool(description = "Find patterns using regex or glob patterns in the codebase")]
1188 fn find_patterns(
1189 &self,
1190 Parameters(params): Parameters<FindPatternsParams>,
1191 ) -> std::result::Result<CallToolResult, McpError> {
1192 info!("Find patterns tool called with pattern: {}", params.pattern);
1193
1194 let p_type = params.pattern_type.unwrap_or_else(|| "glob".to_string());
1195 let max_results = params.limit.unwrap_or(100) as usize;
1196
1197 match p_type.as_str() {
1199 "regex" | "glob" => {
1200 }
1202 _ => {
1203 let error_msg =
1204 format!("Invalid pattern type: {p_type}. Must be 'regex' or 'glob'");
1205 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
1206 }
1207 }
1208
1209 let result = match p_type.as_str() {
1210 "regex" => {
1211 match self
1213 .content_search
1214 .regex_search(¶ms.pattern, Some(max_results))
1215 {
1216 Ok(search_results) => {
1217 let mut pattern_matches = Vec::new();
1218
1219 for search_result in search_results {
1220 let file_path =
1221 search_result.chunk.file_path.to_string_lossy().to_string();
1222
1223 if let Some(ref file_types) = params.file_types {
1225 let extension = search_result
1226 .chunk
1227 .file_path
1228 .extension()
1229 .and_then(|ext| ext.to_str())
1230 .unwrap_or("");
1231
1232 if !file_types.iter().any(|ft| {
1233 ft.trim_start_matches('*').trim_start_matches('.') == extension
1234 || ft == "*"
1235 || ft == "all"
1236 }) {
1237 continue;
1238 }
1239 }
1240
1241 for search_match in search_result.matches {
1242 pattern_matches.push(serde_json::json!({
1243 "file_path": file_path,
1244 "match_text": search_match.text,
1245 "line_number": search_match.line_number,
1246 "column_number": search_match.column_number,
1247 "position": search_match.position,
1248 "context_before": search_match.context_before,
1249 "context_after": search_match.context_after,
1250 "score": search_result.score
1251 }));
1252 }
1253 }
1254
1255 serde_json::json!({
1256 "status": "success",
1257 "pattern_type": "regex",
1258 "pattern": params.pattern,
1259 "matches_found": pattern_matches.len(),
1260 "matches": pattern_matches,
1261 "file_types": params.file_types,
1262 "limit": max_results
1263 })
1264 }
1265 Err(e) => {
1266 serde_json::json!({
1267 "status": "error",
1268 "message": format!("Regex pattern search failed: {e}"),
1269 "pattern": params.pattern,
1270 "pattern_type": "regex"
1271 })
1272 }
1273 }
1274 }
1275 "glob" => {
1276 match self.content_search.find_files(¶ms.pattern) {
1278 Ok(file_paths) => {
1279 let mut filtered_files: Vec<_> =
1280 file_paths.into_iter().take(max_results).collect();
1281
1282 if let Some(ref file_types) = params.file_types {
1284 filtered_files.retain(|path| {
1285 let extension =
1286 path.extension().and_then(|ext| ext.to_str()).unwrap_or("");
1287
1288 file_types.iter().any(|ft| {
1289 ft.trim_start_matches('*').trim_start_matches('.') == extension
1290 || ft == "*"
1291 || ft == "all"
1292 })
1293 });
1294 }
1295
1296 let file_matches: Vec<_> = filtered_files
1297 .iter()
1298 .map(|path| {
1299 serde_json::json!({
1300 "file_path": path.to_string_lossy(),
1301 "file_name": path.file_name()
1302 .and_then(|name| name.to_str())
1303 .unwrap_or(""),
1304 "extension": path.extension()
1305 .and_then(|ext| ext.to_str())
1306 .unwrap_or(""),
1307 "directory": path.parent()
1308 .map(|p| p.to_string_lossy().to_string())
1309 .unwrap_or_else(|| ".".to_string())
1310 })
1311 })
1312 .collect();
1313
1314 serde_json::json!({
1315 "status": "success",
1316 "pattern_type": "glob",
1317 "pattern": params.pattern,
1318 "files_found": file_matches.len(),
1319 "files": file_matches,
1320 "file_types": params.file_types,
1321 "limit": max_results
1322 })
1323 }
1324 Err(e) => {
1325 serde_json::json!({
1326 "status": "error",
1327 "message": format!("Glob pattern search failed: {e}"),
1328 "pattern": params.pattern,
1329 "pattern_type": "glob"
1330 })
1331 }
1332 }
1333 }
1334 _ => unreachable!("Pattern type already validated"),
1335 };
1336
1337 Ok(CallToolResult::success(vec![Content::text(
1338 serde_json::to_string_pretty(&result)
1339 .unwrap_or_else(|_| "Error formatting response".to_string()),
1340 )]))
1341 }
1342
1343 #[tool(description = "Perform semantic search to find conceptually related code")]
1345 fn semantic_search(
1346 &self,
1347 Parameters(params): Parameters<SemanticSearchParams>,
1348 ) -> std::result::Result<CallToolResult, McpError> {
1349 info!(
1350 "Semantic search tool called for concept: {}",
1351 params.concept
1352 );
1353
1354 let max_results = params.limit.unwrap_or(20);
1355 let relevance_threshold = params.relevance_threshold.unwrap_or(0.3);
1356 let include_similar = params.include_similar.unwrap_or(true);
1357
1358 let mut semantic_results = Vec::new();
1360 let mut seen_files = std::collections::HashSet::new();
1361
1362 let keywords = self.extract_semantic_keywords(¶ms.concept);
1364
1365 for keyword in &keywords {
1366 if let Ok(content_results) = self.content_search.search(
1368 &SearchQueryBuilder::new(keyword)
1369 .max_results(max_results / keywords.len().max(1))
1370 .build(),
1371 ) {
1372 for content_result in content_results {
1373 let file_path = content_result.chunk.file_path.to_string_lossy().to_string();
1374
1375 if seen_files.contains(&file_path) {
1376 continue;
1377 }
1378 seen_files.insert(file_path.clone());
1379
1380 let relevance = self.calculate_semantic_relevance(
1382 ¶ms.concept,
1383 &content_result.chunk.content,
1384 params.context.as_deref(),
1385 );
1386
1387 if relevance >= relevance_threshold {
1388 semantic_results.push(serde_json::json!({
1389 "type": "content_match",
1390 "file": file_path,
1391 "relevance": relevance,
1392 "keyword": keyword,
1393 "matches": content_result.matches.iter().map(|m| {
1394 serde_json::json!({
1395 "text": m.text,
1396 "line": m.line_number,
1397 "column": m.column_number,
1398 "context_before": m.context_before,
1399 "context_after": m.context_after
1400 })
1401 }).collect::<Vec<_>>(),
1402 "chunk_type": content_result.chunk.content_type,
1403 "score": content_result.score
1404 }));
1405 }
1406 }
1407 }
1408
1409 if let Ok(symbol_results) = self.graph_query.search_symbols(
1411 keyword,
1412 None,
1413 Some(max_results / keywords.len().max(1)),
1414 ) {
1415 for symbol_result in symbol_results {
1416 let file_path = symbol_result.node.file.to_string_lossy().to_string();
1417
1418 let relevance = self.calculate_symbol_semantic_relevance(
1420 ¶ms.concept,
1421 &symbol_result.node,
1422 params.context.as_deref(),
1423 );
1424
1425 if relevance >= relevance_threshold {
1426 semantic_results.push(serde_json::json!({
1427 "type": "symbol_match",
1428 "file": file_path,
1429 "relevance": relevance,
1430 "keyword": keyword,
1431 "symbol": {
1432 "id": symbol_result.node.id.to_hex(),
1433 "name": symbol_result.node.name,
1434 "kind": format!("{:?}", symbol_result.node.kind).to_lowercase(),
1435 "line": symbol_result.node.span.start_line,
1436 "column": symbol_result.node.span.start_column,
1437 "metadata": symbol_result.node.metadata
1438 },
1439 "references_count": symbol_result.references_count,
1440 "dependencies_count": symbol_result.dependencies_count
1441 }));
1442 }
1443 }
1444 }
1445 }
1446
1447 if include_similar {
1449 let concept_variations = self.generate_concept_variations(¶ms.concept);
1450
1451 for variation in concept_variations {
1452 if let Ok(similar_symbols) =
1453 self.graph_query.search_symbols(&variation, None, Some(5))
1454 {
1455 for symbol_result in similar_symbols {
1456 let file_path = symbol_result.node.file.to_string_lossy().to_string();
1457
1458 if !seen_files.contains(&file_path) {
1459 let relevance = self.calculate_symbol_semantic_relevance(
1460 ¶ms.concept,
1461 &symbol_result.node,
1462 params.context.as_deref(),
1463 ) * 0.8; if relevance >= relevance_threshold {
1466 semantic_results.push(serde_json::json!({
1467 "type": "similar_symbol",
1468 "file": file_path,
1469 "relevance": relevance,
1470 "variation": variation,
1471 "symbol": {
1472 "id": symbol_result.node.id.to_hex(),
1473 "name": symbol_result.node.name,
1474 "kind": format!("{:?}", symbol_result.node.kind).to_lowercase(),
1475 "line": symbol_result.node.span.start_line,
1476 "column": symbol_result.node.span.start_column
1477 }
1478 }));
1479 }
1480 }
1481 }
1482 }
1483 }
1484 }
1485
1486 semantic_results.sort_by(|a, b| {
1488 let relevance_a = a["relevance"].as_f64().unwrap_or(0.0);
1489 let relevance_b = b["relevance"].as_f64().unwrap_or(0.0);
1490 relevance_b
1491 .partial_cmp(&relevance_a)
1492 .unwrap_or(std::cmp::Ordering::Equal)
1493 });
1494
1495 semantic_results.truncate(max_results);
1496
1497 let result = serde_json::json!({
1498 "status": "success",
1499 "concept": params.concept,
1500 "context": params.context,
1501 "results_found": semantic_results.len(),
1502 "results": semantic_results,
1503 "search_strategy": {
1504 "keywords_used": keywords,
1505 "relevance_threshold": relevance_threshold,
1506 "include_similar": include_similar,
1507 "max_results": max_results
1508 },
1509 "notes": [
1510 "Semantic search combines keyword matching with contextual analysis",
1511 "Relevance scores are calculated based on concept match and context",
1512 "Similar symbol variations are included when include_similar=true"
1513 ]
1514 });
1515
1516 Ok(CallToolResult::success(vec![Content::text(
1517 serde_json::to_string_pretty(&result)
1518 .unwrap_or_else(|_| "Error formatting response".to_string()),
1519 )]))
1520 }
1521
1522 #[tool(description = "Advanced search combining multiple search criteria and filters")]
1524 fn advanced_search(
1525 &self,
1526 Parameters(params): Parameters<AdvancedSearchParams>,
1527 ) -> std::result::Result<CallToolResult, McpError> {
1528 info!("Advanced search tool called with query: {}", params.query);
1529
1530 let max_results = params.limit.unwrap_or(50);
1531 let include_tests = params.include_tests.unwrap_or(false);
1532 let include_dependencies = params.include_dependencies.unwrap_or(false);
1533
1534 let mut search_results = Vec::new();
1536 let mut processed_files = std::collections::HashSet::new();
1537
1538 let mut content_query_builder =
1540 SearchQueryBuilder::new(¶ms.query).max_results(max_results);
1541
1542 if let Some(ref file_types) = params.file_types {
1544 let file_patterns: Vec<String> = file_types
1545 .iter()
1546 .map(|ext| format!("*.{}", ext.trim_start_matches('*').trim_start_matches('.')))
1547 .collect();
1548 content_query_builder = content_query_builder.include_files(file_patterns);
1549 }
1550
1551 if let Some(ref exclude_patterns) = params.exclude_patterns {
1553 content_query_builder = content_query_builder.exclude_files(exclude_patterns.clone());
1554 }
1555
1556 if let Ok(content_results) = self.content_search.search(&content_query_builder.build()) {
1558 for content_result in content_results {
1559 let file_path = content_result.chunk.file_path.to_string_lossy().to_string();
1560
1561 if !include_tests && self.is_test_file(&file_path) {
1563 continue;
1564 }
1565
1566 if !include_dependencies && self.is_dependency_file(&file_path) {
1568 continue;
1569 }
1570
1571 if let Some(ref size_range) = params.size_range {
1573 if let Ok(metadata) = std::fs::metadata(&content_result.chunk.file_path) {
1574 if !self.matches_size_range(metadata.len(), size_range) {
1575 continue;
1576 }
1577 }
1578 }
1579
1580 processed_files.insert(file_path.clone());
1581
1582 search_results.push(serde_json::json!({
1583 "type": "content_match",
1584 "file": file_path,
1585 "score": content_result.score,
1586 "matches": content_result.matches.iter().map(|m| {
1587 serde_json::json!({
1588 "text": m.text,
1589 "line": m.line_number,
1590 "column": m.column_number,
1591 "context_before": m.context_before,
1592 "context_after": m.context_after
1593 })
1594 }).collect::<Vec<_>>(),
1595 "content_type": content_result.chunk.content_type,
1596 "file_size": std::fs::metadata(&content_result.chunk.file_path)
1597 .map(|m| m.len())
1598 .unwrap_or(0)
1599 }));
1600 }
1601 }
1602
1603 let symbol_types = if let Some(ref types) = params.symbol_types {
1605 let mut node_kinds = Vec::new();
1606 for sym_type in types {
1607 match sym_type.as_str() {
1608 "function" | "functions" => node_kinds.push(NodeKind::Function),
1609 "class" | "classes" => node_kinds.push(NodeKind::Class),
1610 "method" | "methods" => node_kinds.push(NodeKind::Method),
1611 "variable" | "variables" => node_kinds.push(NodeKind::Variable),
1612 "module" | "modules" => node_kinds.push(NodeKind::Module),
1613 _ => {}
1614 }
1615 }
1616 Some(node_kinds)
1617 } else {
1618 None
1619 };
1620
1621 if let Ok(symbol_results) =
1622 self.graph_query
1623 .search_symbols(¶ms.query, symbol_types, Some(max_results))
1624 {
1625 for symbol_result in symbol_results {
1626 let file_path = symbol_result.node.file.to_string_lossy().to_string();
1627
1628 if processed_files.contains(&file_path) {
1630 continue;
1631 }
1632
1633 if !include_tests && self.is_test_file(&file_path) {
1635 continue;
1636 }
1637
1638 if !include_dependencies && self.is_dependency_file(&file_path) {
1639 continue;
1640 }
1641
1642 if let Some(ref complexity_filter) = params.complexity_filter {
1644 let complexity_score = self.estimate_symbol_complexity(&symbol_result.node);
1645 if !self.matches_complexity_filter(complexity_score, complexity_filter) {
1646 continue;
1647 }
1648 }
1649
1650 search_results.push(serde_json::json!({
1651 "type": "symbol_match",
1652 "file": file_path,
1653 "symbol": {
1654 "id": symbol_result.node.id.to_hex(),
1655 "name": symbol_result.node.name,
1656 "kind": format!("{:?}", symbol_result.node.kind).to_lowercase(),
1657 "line": symbol_result.node.span.start_line,
1658 "column": symbol_result.node.span.start_column,
1659 "metadata": symbol_result.node.metadata
1660 },
1661 "references_count": symbol_result.references_count,
1662 "dependencies_count": symbol_result.dependencies_count,
1663 "complexity_estimate": self.estimate_symbol_complexity(&symbol_result.node)
1664 }));
1665 }
1666 }
1667
1668 if let Some(ref date_range) = params.date_range {
1670 search_results.retain(|result| {
1671 if let Some(file_path) = result["file"].as_str() {
1672 if let Ok(metadata) = std::fs::metadata(file_path) {
1673 if let Ok(modified) = metadata.modified() {
1674 return self.matches_date_range(modified, date_range);
1675 }
1676 }
1677 }
1678 false
1679 });
1680 }
1681
1682 search_results.sort_by(|a, b| {
1684 let score_a = a["score"].as_f64().unwrap_or(0.0);
1685 let score_b = b["score"].as_f64().unwrap_or(0.0);
1686 score_b
1687 .partial_cmp(&score_a)
1688 .unwrap_or(std::cmp::Ordering::Equal)
1689 });
1690
1691 search_results.truncate(max_results);
1693
1694 let result = serde_json::json!({
1695 "status": "success",
1696 "query": params.query,
1697 "results_found": search_results.len(),
1698 "results": search_results,
1699 "filters_applied": {
1700 "file_types": params.file_types,
1701 "symbol_types": params.symbol_types,
1702 "date_range": params.date_range,
1703 "size_range": params.size_range,
1704 "complexity_filter": params.complexity_filter,
1705 "exclude_patterns": params.exclude_patterns,
1706 "include_tests": include_tests,
1707 "include_dependencies": include_dependencies,
1708 "limit": max_results
1709 },
1710 "search_strategy": [
1711 "Content-based search with text matching",
1712 "Symbol-based search with type filtering",
1713 "File metadata filtering (size, date, type)",
1714 "Complexity analysis integration",
1715 "Test and dependency file filtering"
1716 ]
1717 });
1718
1719 Ok(CallToolResult::success(vec![Content::text(
1720 serde_json::to_string_pretty(&result)
1721 .unwrap_or_else(|_| "Error formatting response".to_string()),
1722 )]))
1723 }
1724
1725 #[tool(
1729 description = "Analyze code complexity including cyclomatic complexity and maintainability"
1730 )]
1731 fn analyze_complexity(
1732 &self,
1733 Parameters(params): Parameters<AnalyzeComplexityParams>,
1734 ) -> std::result::Result<CallToolResult, McpError> {
1735 info!(
1736 "Analyze complexity tool called for target: {}",
1737 params.target
1738 );
1739
1740 let metrics = params.metrics.unwrap_or_else(|| vec!["all".to_string()]);
1741 let threshold_warnings = params.threshold_warnings.unwrap_or(true);
1742
1743 let result = if std::path::Path::new(¶ms.target).exists() {
1745 match self.code_analyzer.complexity.analyze_file_complexity(
1747 std::path::Path::new(¶ms.target),
1748 &metrics,
1749 threshold_warnings,
1750 ) {
1751 Ok(analysis) => {
1752 serde_json::json!({
1753 "status": "success",
1754 "target_type": "file",
1755 "target": params.target,
1756 "analysis": analysis,
1757 "settings": {
1758 "metrics": metrics,
1759 "threshold_warnings": threshold_warnings
1760 }
1761 })
1762 }
1763 Err(e) => {
1764 serde_json::json!({
1765 "status": "error",
1766 "message": format!("Failed to analyze file complexity: {e}"),
1767 "target": params.target
1768 })
1769 }
1770 }
1771 } else if params.target.starts_with("**") || params.target.contains("*") {
1772 match &self.repository_path {
1774 Some(repo_path) => {
1775 let pattern = if params.target.starts_with("**/") {
1776 repo_path.join(¶ms.target[3..]).display().to_string()
1778 } else {
1779 repo_path.join(¶ms.target).display().to_string()
1780 };
1781
1782 let mut all_results = Vec::new();
1784 if let Ok(paths) = glob::glob(&pattern) {
1785 for path in paths.flatten() {
1786 if let Ok(analysis) = self
1787 .code_analyzer
1788 .complexity
1789 .analyze_file_complexity(&path, &metrics, threshold_warnings)
1790 {
1791 all_results.push(analysis);
1792 }
1793 }
1794 }
1795
1796 if all_results.is_empty() {
1797 serde_json::json!({
1798 "status": "success",
1799 "target_type": "pattern",
1800 "target": params.target,
1801 "message": "No files found matching pattern",
1802 "files_analyzed": 0
1803 })
1804 } else {
1805 serde_json::json!({
1806 "status": "success",
1807 "target_type": "pattern",
1808 "target": params.target,
1809 "files_analyzed": all_results.len(),
1810 "results": all_results,
1811 "settings": {
1812 "metrics": metrics,
1813 "threshold_warnings": threshold_warnings
1814 }
1815 })
1816 }
1817 }
1818 None => {
1819 serde_json::json!({
1820 "status": "error",
1821 "message": "No repository configured. Call initialize_repository first.",
1822 "target": params.target
1823 })
1824 }
1825 }
1826 } else {
1827 serde_json::json!({
1829 "status": "error",
1830 "message": format!("Target '{}' not found. Provide a valid file path or glob pattern.", params.target),
1831 "target": params.target,
1832 "hint": "Use a file path like 'src/main.rs' or a pattern like '**/*.rs'"
1833 })
1834 };
1835
1836 Ok(CallToolResult::success(vec![Content::text(
1837 serde_json::to_string_pretty(&result)
1838 .unwrap_or_else(|_| "Error formatting response".to_string()),
1839 )]))
1840 }
1841
1842 #[tool(description = "Analyze control flow patterns and execution paths in code")]
1844 fn analyze_control_flow(
1845 &self,
1846 Parameters(params): Parameters<AnalyzeControlFlowParams>,
1847 ) -> std::result::Result<CallToolResult, McpError> {
1848 info!(
1849 "Analyze control flow tool called for target: {}",
1850 params.target
1851 );
1852
1853 let analysis_types = params
1854 .analysis_types
1855 .unwrap_or_else(|| vec!["all".to_string()]);
1856 let max_depth = params.max_depth.unwrap_or(10) as usize;
1857 let include_paths = params.include_paths.unwrap_or(true);
1858
1859 let result = self.analyze_control_flow_patterns(
1860 ¶ms.target,
1861 &analysis_types,
1862 max_depth,
1863 include_paths,
1864 );
1865
1866 match result {
1867 Ok(analysis) => Ok(CallToolResult::success(vec![Content::text(
1868 serde_json::to_string_pretty(&analysis)
1869 .unwrap_or_else(|_| "Error formatting response".to_string()),
1870 )])),
1871 Err(e) => {
1872 let error_result = serde_json::json!({
1873 "status": "error",
1874 "message": format!("Control flow analysis failed: {e}"),
1875 "target": params.target,
1876 "analysis_types": analysis_types,
1877 "max_depth": max_depth
1878 });
1879
1880 Ok(CallToolResult::success(vec![Content::text(
1881 serde_json::to_string_pretty(&error_result)
1882 .unwrap_or_else(|_| "Error formatting response".to_string()),
1883 )]))
1884 }
1885 }
1886 }
1887
1888 #[tool(description = "Comprehensive code quality analysis with actionable recommendations")]
1890 fn analyze_code_quality(
1891 &self,
1892 Parameters(params): Parameters<AnalyzeCodeQualityParams>,
1893 ) -> std::result::Result<CallToolResult, McpError> {
1894 info!(
1895 "Analyze code quality tool called for target: {}",
1896 params.target
1897 );
1898
1899 let quality_types = params
1900 .quality_types
1901 .unwrap_or_else(|| vec!["all".to_string()]);
1902 let severity_threshold = params
1903 .severity_threshold
1904 .unwrap_or_else(|| "low".to_string());
1905 let include_recommendations = params.include_recommendations.unwrap_or(true);
1906 let detailed_analysis = params.detailed_analysis.unwrap_or(false);
1907
1908 let analysis_result = self.analyze_code_quality_comprehensive(
1910 ¶ms.target,
1911 &quality_types,
1912 &severity_threshold,
1913 include_recommendations,
1914 detailed_analysis,
1915 );
1916
1917 let result = match analysis_result {
1918 Ok(analysis) => analysis,
1919 Err(e) => {
1920 serde_json::json!({
1921 "status": "error",
1922 "message": format!("Code quality analysis failed: {e}"),
1923 "target": params.target
1924 })
1925 }
1926 };
1927
1928 Ok(CallToolResult::success(vec![Content::text(
1929 serde_json::to_string_pretty(&result)
1930 .unwrap_or_else(|_| "Error formatting response".to_string()),
1931 )]))
1932 }
1933
1934 #[tool(description = "Analyze performance bottlenecks and optimization opportunities")]
1936 fn analyze_performance(
1937 &self,
1938 Parameters(params): Parameters<AnalyzePerformanceParams>,
1939 ) -> std::result::Result<CallToolResult, McpError> {
1940 info!(
1941 "Analyze performance tool called for target: {}",
1942 params.target
1943 );
1944
1945 let analysis_types = params
1946 .analysis_types
1947 .unwrap_or_else(|| vec!["all".to_string()]);
1948 let complexity_threshold = params
1949 .complexity_threshold
1950 .unwrap_or_else(|| "medium".to_string());
1951
1952 let result = if std::path::Path::new(¶ms.target).exists() {
1954 let file_content = match std::fs::read_to_string(¶ms.target) {
1956 Ok(content) => content,
1957 Err(e) => {
1958 return Ok(CallToolResult::error(vec![Content::text(format!(
1959 "Failed to read file '{}': {}",
1960 params.target, e
1961 ))]));
1962 }
1963 };
1964
1965 match self.code_analyzer.performance.analyze_content(
1966 &file_content,
1967 &analysis_types,
1968 &complexity_threshold,
1969 ) {
1970 Ok(issues) => {
1971 let recommendations = self
1972 .code_analyzer
1973 .performance
1974 .get_performance_recommendations(&issues);
1975
1976 serde_json::json!({
1977 "status": "success",
1978 "target_type": "file",
1979 "target": params.target,
1980 "performance_analysis": {
1981 "issues_found": issues.len(),
1982 "issues": issues.iter().map(|issue| {
1983 serde_json::json!({
1984 "type": issue.issue_type,
1985 "severity": issue.severity,
1986 "description": issue.description,
1987 "location": issue.location,
1988 "recommendation": issue.recommendation,
1989 "complexity_estimate": issue.complexity_estimate,
1990 "impact_score": issue.impact_score,
1991 "optimization_effort": issue.optimization_effort
1992 })
1993 }).collect::<Vec<_>>(),
1994 "recommendations": recommendations,
1995 "overall_grade": self.calculate_performance_grade(&issues)
1996 },
1997 "settings": {
1998 "analysis_types": analysis_types,
1999 "complexity_threshold": complexity_threshold
2000 }
2001 })
2002 }
2003 Err(e) => {
2004 serde_json::json!({
2005 "status": "error",
2006 "message": format!("Failed to analyze performance: {e}"),
2007 "target": params.target
2008 })
2009 }
2010 }
2011 } else if params.target.starts_with("**") || params.target.contains("*") {
2012 match &self.repository_path {
2014 Some(repo_path) => {
2015 let pattern = if params.target.starts_with("**/") {
2016 repo_path.join(¶ms.target[3..]).display().to_string()
2017 } else {
2018 repo_path.join(¶ms.target).display().to_string()
2019 };
2020
2021 let mut all_issues = Vec::new();
2022 let mut files_analyzed = 0;
2023
2024 if let Ok(paths) = glob::glob(&pattern) {
2025 for path in paths.flatten() {
2026 if let Ok(content) = std::fs::read_to_string(&path) {
2027 if let Ok(issues) = self.code_analyzer.performance.analyze_content(
2028 &content,
2029 &analysis_types,
2030 &complexity_threshold,
2031 ) {
2032 all_issues.extend(issues);
2033 files_analyzed += 1;
2034 }
2035 }
2036 }
2037 }
2038
2039 let recommendations = self
2040 .code_analyzer
2041 .performance
2042 .get_performance_recommendations(&all_issues);
2043
2044 serde_json::json!({
2045 "status": "success",
2046 "target_type": "pattern",
2047 "target": params.target,
2048 "files_analyzed": files_analyzed,
2049 "performance_analysis": {
2050 "total_issues": all_issues.len(),
2051 "issues": all_issues.iter().map(|issue| {
2052 serde_json::json!({
2053 "type": issue.issue_type,
2054 "severity": issue.severity,
2055 "description": issue.description,
2056 "location": issue.location,
2057 "recommendation": issue.recommendation,
2058 "complexity_estimate": issue.complexity_estimate,
2059 "impact_score": issue.impact_score,
2060 "optimization_effort": issue.optimization_effort
2061 })
2062 }).collect::<Vec<_>>(),
2063 "recommendations": recommendations,
2064 "overall_grade": self.calculate_performance_grade(&all_issues)
2065 },
2066 "settings": {
2067 "analysis_types": analysis_types,
2068 "complexity_threshold": complexity_threshold
2069 }
2070 })
2071 }
2072 None => {
2073 serde_json::json!({
2074 "status": "error",
2075 "message": "No repository configured. Call initialize_repository first.",
2076 "target": params.target
2077 })
2078 }
2079 }
2080 } else {
2081 serde_json::json!({
2082 "status": "error",
2083 "message": format!("Target '{}' not found. Provide a valid file path or glob pattern.", params.target),
2084 "target": params.target,
2085 "hint": "Use a file path like 'src/main.rs' or a pattern like '**/*.rs'"
2086 })
2087 };
2088
2089 Ok(CallToolResult::success(vec![Content::text(
2090 serde_json::to_string_pretty(&result)
2091 .unwrap_or_else(|_| "Error formatting response".to_string()),
2092 )]))
2093 }
2094
2095 #[tool(
2097 description = "Comprehensive JavaScript/TypeScript analysis with framework detection and ES compatibility"
2098 )]
2099 fn analyze_javascript(
2100 &self,
2101 Parameters(params): Parameters<AnalyzeJavaScriptParams>,
2102 ) -> std::result::Result<CallToolResult, McpError> {
2103 info!(
2104 "Analyze JavaScript tool called for target: {}",
2105 params.target
2106 );
2107
2108 let analysis_types = params
2109 .analysis_types
2110 .unwrap_or_else(|| vec!["all".to_string()]);
2111 let es_target = params.es_target.unwrap_or_else(|| "ES2020".to_string());
2112 let framework_hints = params.framework_hints.unwrap_or_default();
2113 let include_recommendations = params.include_recommendations.unwrap_or(true);
2114 let detailed_analysis = params.detailed_analysis.unwrap_or(false);
2115
2116 let analysis_result = self.analyze_javascript_comprehensive(
2118 ¶ms.target,
2119 &analysis_types,
2120 &es_target,
2121 &framework_hints,
2122 include_recommendations,
2123 detailed_analysis,
2124 );
2125
2126 let result = match analysis_result {
2127 Ok(analysis) => analysis,
2128 Err(e) => {
2129 serde_json::json!({
2130 "status": "error",
2131 "message": format!("JavaScript analysis failed: {e}"),
2132 "target": params.target
2133 })
2134 }
2135 };
2136
2137 Ok(CallToolResult::success(vec![Content::text(
2138 serde_json::to_string_pretty(&result)
2139 .unwrap_or_else(|_| "Error formatting response".to_string()),
2140 )]))
2141 }
2142
2143 #[tool(description = "Analyze security vulnerabilities and potential threats")]
2145 fn analyze_security(
2146 &self,
2147 Parameters(params): Parameters<AnalyzeSecurityParams>,
2148 ) -> std::result::Result<CallToolResult, McpError> {
2149 info!("Analyze security tool called for target: {}", params.target);
2150
2151 let vulnerability_types = params
2152 .vulnerability_types
2153 .unwrap_or_else(|| vec!["all".to_string()]);
2154 let severity_threshold = params
2155 .severity_threshold
2156 .unwrap_or_else(|| "low".to_string());
2157
2158 let result = if std::path::Path::new(¶ms.target).exists() {
2160 let file_content = match std::fs::read_to_string(¶ms.target) {
2162 Ok(content) => content,
2163 Err(e) => {
2164 return Ok(CallToolResult::error(vec![Content::text(format!(
2165 "Failed to read file '{}': {}",
2166 params.target, e
2167 ))]));
2168 }
2169 };
2170
2171 match self.code_analyzer.security.analyze_content_with_location(
2172 &file_content,
2173 Some(¶ms.target),
2174 &vulnerability_types,
2175 &severity_threshold,
2176 ) {
2177 Ok(vulnerabilities) => {
2178 let recommendations = self
2179 .code_analyzer
2180 .security
2181 .get_security_recommendations(&vulnerabilities);
2182
2183 let security_report = self
2184 .code_analyzer
2185 .security
2186 .generate_security_report(&vulnerabilities);
2187
2188 serde_json::json!({
2189 "status": "success",
2190 "target_type": "file",
2191 "target": params.target,
2192 "security_analysis": {
2193 "vulnerabilities_found": vulnerabilities.len(),
2194 "vulnerabilities": vulnerabilities.iter().map(|vuln| {
2195 serde_json::json!({
2196 "type": vuln.vulnerability_type,
2197 "severity": vuln.severity,
2198 "description": vuln.description,
2199 "location": vuln.location,
2200 "recommendation": vuln.recommendation,
2201 "cvss_score": vuln.cvss_score,
2202 "owasp_category": vuln.owasp_category,
2203 "confidence": vuln.confidence,
2204 "line_number": vuln.line_number
2205 })
2206 }).collect::<Vec<_>>(),
2207 "recommendations": recommendations,
2208 "security_report": security_report
2209 },
2210 "settings": {
2211 "vulnerability_types": vulnerability_types,
2212 "severity_threshold": severity_threshold
2213 }
2214 })
2215 }
2216 Err(e) => {
2217 serde_json::json!({
2218 "status": "error",
2219 "message": format!("Failed to analyze security: {e}"),
2220 "target": params.target
2221 })
2222 }
2223 }
2224 } else if params.target.starts_with("**") || params.target.contains("*") {
2225 match &self.repository_path {
2227 Some(repo_path) => {
2228 let pattern = if params.target.starts_with("**/") {
2229 repo_path.join(¶ms.target[3..]).display().to_string()
2230 } else {
2231 repo_path.join(¶ms.target).display().to_string()
2232 };
2233
2234 let mut all_vulnerabilities = Vec::new();
2235 let mut files_analyzed = 0;
2236
2237 if let Ok(paths) = glob::glob(&pattern) {
2238 for path in paths.flatten() {
2239 if let Ok(content) = std::fs::read_to_string(&path) {
2240 if let Ok(vulnerabilities) =
2241 self.code_analyzer.security.analyze_content_with_location(
2242 &content,
2243 Some(&path.display().to_string()),
2244 &vulnerability_types,
2245 &severity_threshold,
2246 )
2247 {
2248 all_vulnerabilities.extend(vulnerabilities);
2249 files_analyzed += 1;
2250 }
2251 }
2252 }
2253 }
2254
2255 let recommendations = self
2256 .code_analyzer
2257 .security
2258 .get_security_recommendations(&all_vulnerabilities);
2259
2260 let security_report = self
2261 .code_analyzer
2262 .security
2263 .generate_security_report(&all_vulnerabilities);
2264
2265 serde_json::json!({
2266 "status": "success",
2267 "target_type": "pattern",
2268 "target": params.target,
2269 "files_analyzed": files_analyzed,
2270 "security_analysis": {
2271 "total_vulnerabilities": all_vulnerabilities.len(),
2272 "vulnerabilities": all_vulnerabilities.iter().map(|vuln| {
2273 serde_json::json!({
2274 "type": vuln.vulnerability_type,
2275 "severity": vuln.severity,
2276 "description": vuln.description,
2277 "location": vuln.location,
2278 "recommendation": vuln.recommendation,
2279 "cvss_score": vuln.cvss_score,
2280 "owasp_category": vuln.owasp_category,
2281 "confidence": vuln.confidence,
2282 "file_path": vuln.file_path,
2283 "line_number": vuln.line_number
2284 })
2285 }).collect::<Vec<_>>(),
2286 "recommendations": recommendations,
2287 "security_report": security_report
2288 },
2289 "settings": {
2290 "vulnerability_types": vulnerability_types,
2291 "severity_threshold": severity_threshold
2292 }
2293 })
2294 }
2295 None => {
2296 serde_json::json!({
2297 "status": "error",
2298 "message": "No repository configured. Call initialize_repository first.",
2299 "target": params.target
2300 })
2301 }
2302 }
2303 } else {
2304 serde_json::json!({
2305 "status": "error",
2306 "message": format!("Target '{}' not found. Provide a valid file path or glob pattern.", params.target),
2307 "target": params.target,
2308 "hint": "Use a file path like 'src/main.rs' or a pattern like '**/*.rs'"
2309 })
2310 };
2311
2312 Ok(CallToolResult::success(vec![Content::text(
2313 serde_json::to_string_pretty(&result)
2314 .unwrap_or_else(|_| "Error formatting response".to_string()),
2315 )]))
2316 }
2317
2318 #[tool(
2320 description = "Comprehensive domain-specific analysis for security, concurrency, architecture, and performance"
2321 )]
2322 fn specialized_analysis(
2323 &self,
2324 Parameters(params): Parameters<SpecializedAnalysisParams>,
2325 ) -> std::result::Result<CallToolResult, McpError> {
2326 info!(
2327 "Specialized analysis tool called for target: {}",
2328 params.target
2329 );
2330
2331 let analysis_domains = params
2332 .analysis_domains
2333 .unwrap_or_else(|| vec!["all".to_string()]);
2334 let severity_threshold = params
2335 .severity_threshold
2336 .unwrap_or_else(|| "low".to_string());
2337 let rule_sets = params.rule_sets.unwrap_or_default();
2338 let include_recommendations = params.include_recommendations.unwrap_or(true);
2339 let detailed_analysis = params.detailed_analysis.unwrap_or(false);
2340
2341 let analysis_result = self.analyze_specialized_comprehensive(
2343 ¶ms.target,
2344 &analysis_domains,
2345 &severity_threshold,
2346 &rule_sets,
2347 params.domain_options.as_ref(),
2348 include_recommendations,
2349 detailed_analysis,
2350 );
2351
2352 let result = match analysis_result {
2353 Ok(analysis) => analysis,
2354 Err(e) => {
2355 serde_json::json!({
2356 "status": "error",
2357 "message": format!("Specialized analysis failed: {e}"),
2358 "target": params.target
2359 })
2360 }
2361 };
2362
2363 Ok(CallToolResult::success(vec![Content::text(
2364 serde_json::to_string_pretty(&result)
2365 .unwrap_or_else(|_| "Error formatting response".to_string()),
2366 )]))
2367 }
2368
2369 #[tool(
2373 description = "Provide context-aware code improvement guidance and workflow recommendations"
2374 )]
2375 fn provide_guidance(
2376 &self,
2377 Parameters(params): Parameters<ProvideGuidanceParams>,
2378 ) -> std::result::Result<CallToolResult, McpError> {
2379 info!("Provide guidance tool called for target: {}", params.target);
2380
2381 let guidance_type = params
2382 .guidance_type
2383 .unwrap_or_else(|| "general".to_string());
2384 let include_examples = params.include_examples.unwrap_or(true);
2385 let priority_level = params
2386 .priority_level
2387 .unwrap_or_else(|| "medium".to_string());
2388
2389 let guidance_result = match guidance_type.as_str() {
2391 "complexity" => {
2392 self.generate_complexity_guidance(¶ms.target, include_examples, &priority_level)
2393 }
2394 "performance" => self.generate_performance_guidance(
2395 ¶ms.target,
2396 include_examples,
2397 &priority_level,
2398 ),
2399 "security" => {
2400 self.generate_security_guidance(¶ms.target, include_examples, &priority_level)
2401 }
2402 "workflow" => {
2403 self.generate_workflow_guidance(¶ms.target, include_examples, &priority_level)
2404 }
2405 "general" => {
2406 self.generate_general_guidance(¶ms.target, include_examples, &priority_level)
2407 }
2408 _ => {
2409 let error_msg = format!("Invalid guidance type: {guidance_type}. Must be one of: complexity, performance, security, workflow, general");
2410 return Ok(CallToolResult::error(vec![Content::text(error_msg)]));
2411 }
2412 };
2413
2414 let result = match guidance_result {
2415 Ok(guidance) => guidance,
2416 Err(e) => {
2417 serde_json::json!({
2418 "status": "error",
2419 "message": format!("Guidance generation failed: {e}"),
2420 "target": params.target,
2421 "guidance_type": guidance_type
2422 })
2423 }
2424 };
2425
2426 Ok(CallToolResult::success(vec![Content::text(
2427 serde_json::to_string_pretty(&result)
2428 .unwrap_or_else(|_| "Error formatting response".to_string()),
2429 )]))
2430 }
2431
2432 #[tool(
2434 description = "Analyze code and provide optimization recommendations for performance and maintainability"
2435 )]
2436 fn optimize_code(
2437 &self,
2438 Parameters(params): Parameters<OptimizeCodeParams>,
2439 ) -> std::result::Result<CallToolResult, McpError> {
2440 info!("Optimize code tool called for target: {}", params.target);
2441
2442 let optimization_types = params
2443 .optimization_types
2444 .unwrap_or_else(|| vec!["performance".to_string(), "maintainability".to_string()]);
2445 let aggressive_mode = params.aggressive_mode.unwrap_or(false);
2446 let max_suggestions = params.max_suggestions.unwrap_or(10);
2447
2448 let optimization_result = self.generate_optimization_suggestions(
2450 ¶ms.target,
2451 &optimization_types,
2452 aggressive_mode,
2453 max_suggestions,
2454 );
2455
2456 let result = match optimization_result {
2457 Ok(optimizations) => optimizations,
2458 Err(e) => {
2459 serde_json::json!({
2460 "status": "error",
2461 "message": format!("Optimization analysis failed: {e}"),
2462 "target": params.target,
2463 "optimization_types": optimization_types
2464 })
2465 }
2466 };
2467
2468 Ok(CallToolResult::success(vec![Content::text(
2469 serde_json::to_string_pretty(&result)
2470 .unwrap_or_else(|_| "Error formatting response".to_string()),
2471 )]))
2472 }
2473
2474 #[tool(description = "Automate common development workflows")]
2476 fn workflow_automation(
2477 &self,
2478 Parameters(params): Parameters<WorkflowAutomationParams>,
2479 ) -> std::result::Result<CallToolResult, McpError> {
2480 info!(
2481 "Workflow automation tool called for workflow: {}",
2482 params.workflow_type
2483 );
2484
2485 let automation_level = params
2486 .automation_level
2487 .unwrap_or_else(|| "standard".to_string());
2488 let dry_run = params.dry_run.unwrap_or(false);
2489 let _target_scope = params
2490 .target_scope
2491 .unwrap_or_else(|| "repository".to_string());
2492
2493 let result = if let Some(ref repo_path) = self.repository_path {
2494 match params.workflow_type.as_str() {
2495 "code_review_checklist" => {
2496 let mut checklist_items = Vec::new();
2497 let mut analysis_results = Vec::new();
2498
2499 checklist_items.extend(vec![
2501 serde_json::json!({
2502 "category": "Code Quality",
2503 "item": "All functions have proper documentation",
2504 "status": "pending",
2505 "automated_check": true
2506 }),
2507 serde_json::json!({
2508 "category": "Performance",
2509 "item": "No obvious performance bottlenecks",
2510 "status": "pending",
2511 "automated_check": true
2512 }),
2513 serde_json::json!({
2514 "category": "Security",
2515 "item": "No security vulnerabilities detected",
2516 "status": "pending",
2517 "automated_check": true
2518 }),
2519 serde_json::json!({
2520 "category": "Testing",
2521 "item": "Adequate test coverage",
2522 "status": "pending",
2523 "automated_check": false
2524 }),
2525 serde_json::json!({
2526 "category": "Code Style",
2527 "item": "Follows coding standards",
2528 "status": "pending",
2529 "automated_check": false
2530 }),
2531 ]);
2532
2533 if !dry_run {
2534 for glob_pattern in &["**/*.rs", "**/*.py", "**/*.js", "**/*.ts"] {
2536 let pattern = repo_path.join(glob_pattern);
2537 if let Ok(paths) = glob::glob(&pattern.display().to_string()) {
2538 for path in paths.flatten() {
2539 if let Ok(content) = std::fs::read_to_string(&path) {
2540 let has_documentation = content.contains("///")
2542 || content.contains("\"\"\"")
2543 || content.contains("/*");
2544
2545 analysis_results.push(serde_json::json!({
2546 "file": path.display().to_string(),
2547 "has_documentation": has_documentation,
2548 "line_count": content.lines().count()
2549 }));
2550 }
2551 }
2552 }
2553 }
2554 }
2555
2556 serde_json::json!({
2557 "status": "success",
2558 "workflow_type": "code_review_checklist",
2559 "automation_level": automation_level,
2560 "dry_run": dry_run,
2561 "checklist": checklist_items,
2562 "analysis_results": if dry_run {
2563 serde_json::json!("Analysis would be performed on actual run")
2564 } else {
2565 serde_json::Value::Array(analysis_results.clone())
2566 },
2567 "summary": {
2568 "total_items": checklist_items.len(),
2569 "automated_items": checklist_items.iter().filter(|item| item.get("automated_check") == Some(&serde_json::Value::Bool(true))).count(),
2570 "files_analyzed": if dry_run { 0 } else { analysis_results.len() }
2571 }
2572 })
2573 }
2574 "refactoring_pipeline" => {
2575 let mut refactoring_steps = Vec::new();
2576 let mut suggested_refactorings = Vec::new();
2577
2578 refactoring_steps.extend(vec![
2580 serde_json::json!({
2581 "step": "Complexity Analysis",
2582 "description": "Identify overly complex functions and methods",
2583 "automated": true,
2584 "priority": "high"
2585 }),
2586 serde_json::json!({
2587 "step": "Duplicate Code Detection",
2588 "description": "Find and consolidate duplicate code blocks",
2589 "automated": true,
2590 "priority": "medium"
2591 }),
2592 serde_json::json!({
2593 "step": "Dead Code Removal",
2594 "description": "Identify and remove unused code",
2595 "automated": false,
2596 "priority": "low"
2597 }),
2598 serde_json::json!({
2599 "step": "Design Pattern Application",
2600 "description": "Apply appropriate design patterns",
2601 "automated": false,
2602 "priority": "medium"
2603 }),
2604 ]);
2605
2606 if !dry_run {
2607 suggested_refactorings.extend(vec![
2609 serde_json::json!({
2610 "type": "Extract Method",
2611 "description": "Long functions should be broken down",
2612 "impact": "medium",
2613 "effort": "low"
2614 }),
2615 serde_json::json!({
2616 "type": "Remove Duplication",
2617 "description": "Consolidate similar code patterns",
2618 "impact": "high",
2619 "effort": "medium"
2620 }),
2621 ]);
2622 }
2623
2624 serde_json::json!({
2625 "status": "success",
2626 "workflow_type": "refactoring_pipeline",
2627 "automation_level": automation_level,
2628 "dry_run": dry_run,
2629 "pipeline_steps": refactoring_steps,
2630 "suggested_refactorings": if dry_run {
2631 serde_json::json!("Suggestions would be generated on actual run")
2632 } else {
2633 serde_json::Value::Array(suggested_refactorings.clone())
2634 },
2635 "summary": {
2636 "total_steps": refactoring_steps.len(),
2637 "automated_steps": refactoring_steps.iter().filter(|step| step.get("automated") == Some(&serde_json::Value::Bool(true))).count(),
2638 "suggestions_generated": if dry_run { 0 } else { suggested_refactorings.len() }
2639 }
2640 })
2641 }
2642 "testing_strategy_generation" => {
2643 let mut testing_recommendations = Vec::new();
2644 let mut test_metrics = serde_json::json!({});
2645
2646 testing_recommendations.extend(vec![
2648 serde_json::json!({
2649 "test_type": "Unit Tests",
2650 "description": "Test individual functions and methods",
2651 "priority": "high",
2652 "coverage_target": "90%",
2653 "tools": ["pytest", "jest", "cargo test"]
2654 }),
2655 serde_json::json!({
2656 "test_type": "Integration Tests",
2657 "description": "Test component interactions",
2658 "priority": "medium",
2659 "coverage_target": "70%",
2660 "tools": ["postman", "cypress", "integration test frameworks"]
2661 }),
2662 serde_json::json!({
2663 "test_type": "Performance Tests",
2664 "description": "Validate performance requirements",
2665 "priority": "medium",
2666 "coverage_target": "critical paths",
2667 "tools": ["benchmark frameworks", "load testing tools"]
2668 }),
2669 ]);
2670
2671 if !dry_run {
2672 let test_files_count = if let Ok(paths) =
2674 glob::glob(&repo_path.join("**/test*").display().to_string())
2675 {
2676 paths.count()
2677 } else {
2678 0
2679 };
2680
2681 test_metrics = serde_json::json!({
2682 "test_files_found": test_files_count,
2683 "estimated_coverage": if test_files_count > 0 { "Some coverage detected" } else { "No tests detected" },
2684 "recommendations_priority": "Immediate action needed"
2685 });
2686 }
2687
2688 serde_json::json!({
2689 "status": "success",
2690 "workflow_type": "testing_strategy_generation",
2691 "automation_level": automation_level,
2692 "dry_run": dry_run,
2693 "testing_strategy": testing_recommendations,
2694 "current_metrics": if dry_run {
2695 serde_json::json!("Metrics would be analyzed on actual run")
2696 } else {
2697 test_metrics
2698 },
2699 "summary": {
2700 "strategy_components": testing_recommendations.len(),
2701 "high_priority_items": testing_recommendations.iter().filter(|item| item.get("priority") == Some(&serde_json::Value::String("high".to_string()))).count()
2702 }
2703 })
2704 }
2705 _ => {
2706 serde_json::json!({
2707 "status": "error",
2708 "message": format!("Unsupported workflow type: {}", params.workflow_type),
2709 "supported_workflows": [
2710 "code_review_checklist",
2711 "refactoring_pipeline",
2712 "testing_strategy_generation"
2713 ]
2714 })
2715 }
2716 }
2717 } else {
2718 serde_json::json!({
2719 "status": "error",
2720 "message": "No repository configured. Call initialize_repository first.",
2721 "workflow_type": params.workflow_type
2722 })
2723 };
2724
2725 Ok(CallToolResult::success(vec![Content::text(
2726 serde_json::to_string_pretty(&result)
2727 .unwrap_or_else(|_| "Error formatting response".to_string()),
2728 )]))
2729 }
2730
2731 #[tool(description = "Process multiple files or operations in batch")]
2733 fn batch_process(
2734 &self,
2735 Parameters(params): Parameters<BatchProcessParams>,
2736 ) -> std::result::Result<CallToolResult, McpError> {
2737 info!(
2738 "Batch process tool called for operation: {}",
2739 params.operation
2740 );
2741
2742 let max_concurrent = params.max_concurrent.unwrap_or(3);
2743 let fail_fast = params.fail_fast.unwrap_or(false);
2744
2745 let result = if let Some(ref repo_path) = self.repository_path {
2746 let mut batch_results = Vec::new();
2747 let mut errors = Vec::new();
2748 let mut processed_count = 0;
2749 let mut skipped_count = 0;
2750
2751 match params.operation.as_str() {
2752 "analyze_complexity" => {
2753 for target in ¶ms.targets {
2754 let target_path = repo_path.join(target);
2755
2756 if target_path.exists() && target_path.is_file() {
2757 match self.code_analyzer.complexity.analyze_file_complexity(
2758 &target_path,
2759 &["all".to_string()],
2760 true,
2761 ) {
2762 Ok(result) => {
2763 batch_results.push(serde_json::json!({
2764 "target": target,
2765 "operation": "analyze_complexity",
2766 "status": "success",
2767 "result": result
2768 }));
2769 processed_count += 1;
2770 }
2771 Err(e) => {
2772 let error_msg =
2773 format!("Failed to analyze complexity for {target}: {e}");
2774 errors.push(error_msg.clone());
2775
2776 if fail_fast {
2777 return Ok(CallToolResult::success(vec![Content::text(
2778 serde_json::to_string_pretty(&serde_json::json!({
2779 "status": "error",
2780 "message": "Batch processing stopped due to error",
2781 "error": error_msg,
2782 "processed": processed_count,
2783 "fail_fast": true
2784 }))
2785 .unwrap_or_else(|_| {
2786 "Error formatting response".to_string()
2787 }),
2788 )]));
2789 }
2790
2791 batch_results.push(serde_json::json!({
2792 "target": target,
2793 "operation": "analyze_complexity",
2794 "status": "error",
2795 "error": error_msg
2796 }));
2797 }
2798 }
2799 } else {
2800 skipped_count += 1;
2801 batch_results.push(serde_json::json!({
2802 "target": target,
2803 "operation": "analyze_complexity",
2804 "status": "skipped",
2805 "reason": "File not found or not a file"
2806 }));
2807 }
2808 }
2809 }
2810 "analyze_performance" => {
2811 for target in ¶ms.targets {
2812 let target_path = repo_path.join(target);
2813
2814 if target_path.exists() && target_path.is_file() {
2815 match std::fs::read_to_string(&target_path) {
2816 Ok(content) => {
2817 match self
2818 .code_analyzer
2819 .performance
2820 .comprehensive_analysis(&content, None)
2821 {
2822 Ok(result) => {
2823 batch_results.push(serde_json::json!({
2824 "target": target,
2825 "operation": "analyze_performance",
2826 "status": "success",
2827 "result": result
2828 }));
2829 processed_count += 1;
2830 }
2831 Err(e) => {
2832 let error_msg = format!(
2833 "Failed to analyze performance for {target}: {e}"
2834 );
2835 errors.push(error_msg.clone());
2836
2837 if fail_fast {
2838 return Ok(CallToolResult::success(vec![Content::text(
2839 serde_json::to_string_pretty(&serde_json::json!({
2840 "status": "error",
2841 "message": "Batch processing stopped due to error",
2842 "error": error_msg,
2843 "processed": processed_count,
2844 "fail_fast": true
2845 })).unwrap_or_else(|_| "Error formatting response".to_string())
2846 )]));
2847 }
2848
2849 batch_results.push(serde_json::json!({
2850 "target": target,
2851 "operation": "analyze_performance",
2852 "status": "error",
2853 "error": error_msg
2854 }));
2855 }
2856 }
2857 }
2858 Err(e) => {
2859 let error_msg = format!("Failed to read file {target}: {e}");
2860 errors.push(error_msg.clone());
2861
2862 batch_results.push(serde_json::json!({
2863 "target": target,
2864 "operation": "analyze_performance",
2865 "status": "error",
2866 "error": error_msg
2867 }));
2868 }
2869 }
2870 } else {
2871 skipped_count += 1;
2872 batch_results.push(serde_json::json!({
2873 "target": target,
2874 "operation": "analyze_performance",
2875 "status": "skipped",
2876 "reason": "File not found or not a file"
2877 }));
2878 }
2879 }
2880 }
2881 "analyze_security" => {
2882 for target in ¶ms.targets {
2883 let target_path = repo_path.join(target);
2884
2885 if target_path.exists() && target_path.is_file() {
2886 match std::fs::read_to_string(&target_path) {
2887 Ok(content) => {
2888 match self.code_analyzer.security.analyze_content_with_location(
2889 &content,
2890 Some(&target_path.display().to_string()),
2891 &["all".to_string()],
2892 "medium",
2893 ) {
2894 Ok(vulnerabilities) => {
2895 batch_results.push(serde_json::json!({
2896 "target": target,
2897 "operation": "analyze_security",
2898 "status": "success",
2899 "result": {
2900 "vulnerabilities_count": vulnerabilities.len(),
2901 "vulnerabilities": vulnerabilities.iter().map(|vuln| {
2902 serde_json::json!({
2903 "type": vuln.vulnerability_type,
2904 "severity": vuln.severity,
2905 "description": vuln.description,
2906 "recommendation": vuln.recommendation
2907 })
2908 }).collect::<Vec<_>>()
2909 }
2910 }));
2911 processed_count += 1;
2912 }
2913 Err(e) => {
2914 let error_msg = format!(
2915 "Failed to analyze security for {target}: {e}"
2916 );
2917 errors.push(error_msg.clone());
2918
2919 batch_results.push(serde_json::json!({
2920 "target": target,
2921 "operation": "analyze_security",
2922 "status": "error",
2923 "error": error_msg
2924 }));
2925 }
2926 }
2927 }
2928 Err(e) => {
2929 let error_msg = format!("Failed to read file {target}: {e}");
2930 errors.push(error_msg.clone());
2931
2932 batch_results.push(serde_json::json!({
2933 "target": target,
2934 "operation": "analyze_security",
2935 "status": "error",
2936 "error": error_msg
2937 }));
2938 }
2939 }
2940 } else {
2941 skipped_count += 1;
2942 batch_results.push(serde_json::json!({
2943 "target": target,
2944 "operation": "analyze_security",
2945 "status": "skipped",
2946 "reason": "File not found or not a file"
2947 }));
2948 }
2949 }
2950 }
2951 "find_patterns" => {
2952 let pattern = if let Some(params_value) = ¶ms.parameters {
2954 params_value
2955 .get("pattern")
2956 .and_then(|v| v.as_str())
2957 .unwrap_or(".*")
2958 } else {
2959 ".*"
2960 };
2961
2962 for target in ¶ms.targets {
2963 let target_path = repo_path.join(target);
2964
2965 if target_path.exists() && target_path.is_file() {
2966 match std::fs::read_to_string(&target_path) {
2967 Ok(content) => {
2968 let regex = match regex::Regex::new(pattern) {
2970 Ok(r) => r,
2971 Err(e) => {
2972 errors.push(format!("Invalid regex pattern: {e}"));
2973 continue;
2974 }
2975 };
2976
2977 let matches: Vec<_> = regex
2978 .find_iter(&content)
2979 .enumerate()
2980 .take(50) .map(|(i, m)| {
2982 let line_num =
2983 content[..m.start()].matches('\n').count() + 1;
2984 serde_json::json!({
2985 "match_index": i,
2986 "match_text": m.as_str(),
2987 "line": line_num,
2988 "start": m.start(),
2989 "end": m.end()
2990 })
2991 })
2992 .collect();
2993
2994 batch_results.push(serde_json::json!({
2995 "target": target,
2996 "operation": "find_patterns",
2997 "status": "success",
2998 "result": {
2999 "pattern": pattern,
3000 "matches_count": matches.len(),
3001 "matches": matches
3002 }
3003 }));
3004 processed_count += 1;
3005 }
3006 Err(e) => {
3007 let error_msg = format!("Failed to read file {target}: {e}");
3008 errors.push(error_msg.clone());
3009
3010 batch_results.push(serde_json::json!({
3011 "target": target,
3012 "operation": "find_patterns",
3013 "status": "error",
3014 "error": error_msg
3015 }));
3016 }
3017 }
3018 } else {
3019 skipped_count += 1;
3020 batch_results.push(serde_json::json!({
3021 "target": target,
3022 "operation": "find_patterns",
3023 "status": "skipped",
3024 "reason": "File not found or not a file"
3025 }));
3026 }
3027 }
3028 }
3029 _ => {
3030 return Ok(CallToolResult::success(vec![Content::text(
3031 serde_json::to_string_pretty(&serde_json::json!({
3032 "status": "error",
3033 "message": format!("Unsupported operation: {}", params.operation),
3034 "supported_operations": [
3035 "analyze_complexity",
3036 "analyze_performance",
3037 "analyze_security",
3038 "find_patterns"
3039 ]
3040 }))
3041 .unwrap_or_else(|_| "Error formatting response".to_string()),
3042 )]));
3043 }
3044 }
3045
3046 serde_json::json!({
3047 "status": "success",
3048 "operation": params.operation,
3049 "summary": {
3050 "total_targets": params.targets.len(),
3051 "processed": processed_count,
3052 "skipped": skipped_count,
3053 "errors": errors.len(),
3054 "max_concurrent": max_concurrent,
3055 "fail_fast": fail_fast
3056 },
3057 "results": batch_results,
3058 "errors": errors
3059 })
3060 } else {
3061 serde_json::json!({
3062 "status": "error",
3063 "message": "No repository configured. Call initialize_repository first.",
3064 "operation": params.operation
3065 })
3066 };
3067
3068 Ok(CallToolResult::success(vec![Content::text(
3069 serde_json::to_string_pretty(&result)
3070 .unwrap_or_else(|_| "Error formatting response".to_string()),
3071 )]))
3072 }
3073
3074 pub async fn initialize_repository<P: AsRef<std::path::Path>>(
3076 &mut self,
3077 repo_path: P,
3078 ) -> Result<(), crate::Error> {
3079 let repo_path = repo_path.as_ref().to_path_buf();
3080
3081 info!("Initializing repository: {}", repo_path.display());
3082
3083 if !repo_path.exists() {
3085 return Err(crate::Error::server_init(format!(
3086 "Repository path does not exist: {}",
3087 repo_path.display()
3088 )));
3089 }
3090
3091 if !repo_path.is_dir() {
3092 return Err(crate::Error::server_init(format!(
3093 "Repository path is not a directory: {}",
3094 repo_path.display()
3095 )));
3096 }
3097
3098 let repo_id = repo_path
3100 .file_name()
3101 .and_then(|n| n.to_str())
3102 .unwrap_or("default")
3103 .to_string();
3104
3105 let repo_config = RepositoryConfig::new(repo_id.clone(), &repo_path)
3106 .with_name(format!("Repository: {repo_id}"))
3107 .with_description(format!(
3108 "CodePrism MCP Server repository at {}",
3109 repo_path.display()
3110 ));
3111
3112 self.graph_store.clear();
3114 info!("Cleared existing graph data");
3115
3116 match Arc::get_mut(&mut self.repository_manager) {
3118 Some(manager) => {
3119 manager
3120 .register_repository(repo_config.clone())
3121 .map_err(|e| {
3122 crate::Error::server_init(format!("Failed to register repository: {e}"))
3123 })?;
3124 info!("Registered repository with manager: {}", repo_id);
3125 }
3126 None => {
3127 let language_registry = Arc::new(codeprism_core::LanguageRegistry::new());
3129 let mut new_manager = codeprism_core::RepositoryManager::new(language_registry);
3130 new_manager
3131 .register_repository(repo_config.clone())
3132 .map_err(|e| {
3133 crate::Error::server_init(format!("Failed to register repository: {e}"))
3134 })?;
3135 self.repository_manager = Arc::new(new_manager);
3136 info!(
3137 "Created new repository manager and registered repository: {}",
3138 repo_id
3139 );
3140 }
3141 }
3142
3143 struct IndexingProgressReporter {
3145 total_files: std::sync::atomic::AtomicUsize,
3146 processed_files: std::sync::atomic::AtomicUsize,
3147 }
3148
3149 impl IndexingProgressReporter {
3150 fn new() -> Self {
3151 Self {
3152 total_files: std::sync::atomic::AtomicUsize::new(0),
3153 processed_files: std::sync::atomic::AtomicUsize::new(0),
3154 }
3155 }
3156 }
3157
3158 impl codeprism_core::ProgressReporter for IndexingProgressReporter {
3159 fn report_progress(&self, current: usize, total: Option<usize>) {
3160 if let Some(total) = total {
3161 self.total_files
3162 .store(total, std::sync::atomic::Ordering::Relaxed);
3163 }
3164 self.processed_files
3165 .store(current, std::sync::atomic::Ordering::Relaxed);
3166
3167 if current % 100 == 0 || (total.is_some() && current == total.unwrap()) {
3168 info!(
3169 "Repository indexing progress: {}/{}",
3170 current,
3171 total
3172 .map(|t| t.to_string())
3173 .unwrap_or_else(|| "?".to_string())
3174 );
3175 }
3176 }
3177
3178 fn report_complete(&self, result: &codeprism_core::ScanResult) {
3179 info!(
3180 "Repository scan completed: {} files discovered in {}ms",
3181 result.total_files, result.duration_ms
3182 );
3183 }
3184
3185 fn report_error(&self, error: &codeprism_core::Error) {
3186 warn!("Repository scanning error: {}", error);
3187 }
3188 }
3189
3190 let progress_reporter = Arc::new(IndexingProgressReporter::new());
3191
3192 info!("Starting repository indexing...");
3194 let start_time = std::time::Instant::now();
3195
3196 let indexing_result = match Arc::try_unwrap(self.repository_manager.clone()) {
3198 Ok(mut manager) => {
3199 let result = manager
3200 .index_repository(&repo_id, Some(progress_reporter.clone()))
3201 .await
3202 .map_err(|e| {
3203 crate::Error::server_init(format!("Failed to index repository: {e}"))
3204 })?;
3205
3206 self.repository_manager = Arc::new(manager);
3208 result
3209 }
3210 Err(shared_manager) => {
3211 warn!("Repository manager is in use, deferring graph population");
3214 warn!("Repository will be indexed on next initialization or when manager becomes available");
3215
3216 self.repository_manager = shared_manager;
3218
3219 self.repository_path = Some(repo_path);
3221 return Ok(());
3222 }
3223 };
3224
3225 let duration = start_time.elapsed();
3226 info!(
3227 "Repository indexing completed in {:.2}s",
3228 duration.as_secs_f64()
3229 );
3230
3231 info!(
3233 "Applying {} patches to graph store...",
3234 indexing_result.patches.len()
3235 );
3236
3237 let mut nodes_added = 0;
3238 let mut edges_added = 0;
3239
3240 for patch in &indexing_result.patches {
3241 for node in &patch.nodes_add {
3243 self.graph_store.add_node(node.clone());
3244 nodes_added += 1;
3245 }
3246
3247 for edge in &patch.edges_add {
3249 self.graph_store.add_edge(edge.clone());
3250 edges_added += 1;
3251 }
3252 }
3253
3254 info!(
3255 "Graph store populated: {} nodes, {} edges",
3256 nodes_added, edges_added
3257 );
3258
3259 info!("Updating content search index...");
3261 let content_search_manager =
3262 ContentSearchManager::with_graph_store(Arc::clone(&self.graph_store));
3263
3264 let mut file_paths = std::collections::HashSet::new();
3266 for patch in &indexing_result.patches {
3267 for node in &patch.nodes_add {
3268 file_paths.insert(&node.file);
3269 }
3270 }
3271
3272 let mut content_files_indexed = 0;
3274 for file_path in file_paths {
3275 if let Ok(content) = std::fs::read_to_string(file_path) {
3276 if let Err(e) = content_search_manager.index_file(file_path, &content) {
3277 warn!("Failed to index content for {}: {}", file_path.display(), e);
3278 } else {
3279 content_files_indexed += 1;
3280 }
3281 }
3282 }
3283
3284 self.content_search = Arc::new(content_search_manager);
3286 info!(
3287 "Content search index updated: {} files indexed",
3288 content_files_indexed
3289 );
3290
3291 self.repository_path = Some(repo_path);
3293
3294 let graph_stats = self.graph_store.get_stats();
3296 info!("Repository initialization completed:");
3297 info!(" - Repository ID: {}", repo_id);
3298 info!(
3299 " - Files processed: {}",
3300 indexing_result.stats.files_processed
3301 );
3302 info!(" - Nodes in graph: {}", graph_stats.total_nodes);
3303 info!(" - Edges in graph: {}", graph_stats.total_edges);
3304 info!(" - Files indexed: {}", graph_stats.total_files);
3305 info!(" - Content files indexed: {}", content_files_indexed);
3306 info!(" - Processing time: {:.2}s", duration.as_secs_f64());
3307
3308 if !indexing_result.failed_files.is_empty() {
3309 warn!(
3310 " - Failed files: {} (check logs for details)",
3311 indexing_result.failed_files.len()
3312 );
3313 for (file_path, error) in indexing_result.failed_files.iter().take(5) {
3314 warn!(" • {}: {}", file_path.display(), error);
3315 }
3316 if indexing_result.failed_files.len() > 5 {
3317 warn!(
3318 " ... and {} more",
3319 indexing_result.failed_files.len() - 5
3320 );
3321 }
3322 }
3323
3324 Ok(())
3325 }
3326
3327 fn calculate_performance_grade(
3329 &self,
3330 issues: &[codeprism_analysis::performance::PerformanceIssue],
3331 ) -> String {
3332 if issues.is_empty() {
3333 return "A".to_string();
3334 }
3335
3336 let critical_count = issues.iter().filter(|i| i.severity == "critical").count();
3337 let high_count = issues.iter().filter(|i| i.severity == "high").count();
3338 let medium_count = issues.iter().filter(|i| i.severity == "medium").count();
3339
3340 match (critical_count, high_count, medium_count) {
3341 (0, 0, 0..=2) => "A",
3342 (0, 0, 3..=5) => "B",
3343 (0, 0, _) => "C", (0, 1..=2, _) => "C",
3345 (0, _, _) => "D", (_, _, _) => "F", }
3348 .to_string()
3349 }
3350
3351 pub async fn run(self) -> std::result::Result<(), crate::Error> {
3353 info!("Starting CodePrism MCP Server");
3354
3355 use rmcp::transport::stdio;
3356
3357 let service = self
3359 .serve(stdio())
3360 .await
3361 .map_err(|e| crate::Error::server_init(format!("Failed to start MCP server: {e}")))?;
3362
3363 info!("MCP server is ready to accept connections");
3364
3365 service
3367 .waiting()
3368 .await
3369 .map_err(|e| crate::Error::server_init(format!("Server error: {e}")))?;
3370
3371 info!("MCP server shut down successfully");
3372 Ok(())
3373 }
3374
3375 pub fn config(&self) -> &Config {
3377 &self.config
3378 }
3379
3380 fn extract_semantic_keywords(&self, concept: &str) -> Vec<String> {
3382 let mut keywords = Vec::new();
3383
3384 keywords.push(concept.to_lowercase());
3386
3387 let words: Vec<&str> = concept
3389 .split(&[' ', '_', '-', '.', '/', '\\'][..])
3390 .filter(|w| !w.is_empty() && w.len() > 2)
3391 .collect();
3392
3393 for word in words {
3394 keywords.push(word.to_lowercase());
3395 }
3396
3397 let programming_keywords = [
3399 "function",
3400 "method",
3401 "class",
3402 "interface",
3403 "service",
3404 "manager",
3405 "handler",
3406 "controller",
3407 "repository",
3408 "model",
3409 "view",
3410 "component",
3411 "module",
3412 "package",
3413 "config",
3414 "configuration",
3415 "settings",
3416 "utils",
3417 "utilities",
3418 "helpers",
3419 ];
3420
3421 for keyword in &programming_keywords {
3422 if concept.to_lowercase().contains(keyword) {
3423 keywords.push(keyword.to_string());
3424 }
3425 }
3426
3427 keywords.sort();
3429 keywords.dedup();
3430 keywords
3431 }
3432
3433 fn calculate_semantic_relevance(
3435 &self,
3436 concept: &str,
3437 content: &str,
3438 context: Option<&str>,
3439 ) -> f32 {
3440 let mut relevance = 0.0;
3441 let concept_lower = concept.to_lowercase();
3442 let content_lower = content.to_lowercase();
3443
3444 if content_lower.contains(&concept_lower) {
3446 relevance += 0.8;
3447 }
3448
3449 let concept_words: Vec<&str> = concept_lower.split_whitespace().collect();
3451 let mut matched_words = 0;
3452
3453 for word in &concept_words {
3454 if content_lower.contains(word) {
3455 matched_words += 1;
3456 }
3457 }
3458
3459 if !concept_words.is_empty() {
3460 relevance += (matched_words as f32 / concept_words.len() as f32) * 0.5;
3461 }
3462
3463 if let Some(ctx) = context {
3465 let context_lower = ctx.to_lowercase();
3466 if content_lower.contains(&context_lower) {
3467 relevance += 0.3;
3468 }
3469 }
3470
3471 if content.len() > 50 && content.len() < 500 {
3473 relevance += 0.1; }
3475
3476 relevance.min(1.0f32)
3477 }
3478
3479 fn calculate_symbol_semantic_relevance(
3481 &self,
3482 concept: &str,
3483 node: &codeprism_core::Node,
3484 context: Option<&str>,
3485 ) -> f32 {
3486 let mut relevance: f32 = 0.0;
3487 let concept_lower = concept.to_lowercase();
3488 let name_lower = node.name.to_lowercase();
3489
3490 if name_lower.contains(&concept_lower) {
3492 relevance += 0.7;
3493 }
3494
3495 match node.kind {
3497 NodeKind::Function | NodeKind::Method => {
3498 if concept_lower.contains("function") || concept_lower.contains("method") {
3499 relevance += 0.3;
3500 }
3501 }
3502 NodeKind::Class => {
3503 if concept_lower.contains("class") || concept_lower.contains("type") {
3504 relevance += 0.3;
3505 }
3506 }
3507 NodeKind::Variable => {
3508 if concept_lower.contains("variable") || concept_lower.contains("data") {
3509 relevance += 0.2;
3510 }
3511 }
3512 _ => {}
3513 }
3514
3515 if let Some(ctx) = context {
3517 let context_lower = ctx.to_lowercase();
3518 if name_lower.contains(&context_lower) {
3519 relevance += 0.2;
3520 }
3521
3522 let file_path = node.file.to_string_lossy().to_lowercase();
3524 if file_path.contains(&context_lower) {
3525 relevance += 0.2;
3526 }
3527 }
3528
3529 if let serde_json::Value::Object(metadata_obj) = &node.metadata {
3531 for (key, value) in metadata_obj {
3532 let metadata_text = format!("{key}: {value}").to_lowercase();
3533 if metadata_text.contains(&concept_lower) {
3534 relevance += 0.1;
3535 }
3536 }
3537 }
3538
3539 relevance.min(1.0f32)
3540 }
3541
3542 fn generate_concept_variations(&self, concept: &str) -> Vec<String> {
3544 let mut variations = Vec::new();
3545 let concept_lower = concept.to_lowercase();
3546
3547 let words: Vec<&str> = concept.split_whitespace().collect();
3549 if words.len() > 1 {
3550 let mut camel_case = words[0].to_lowercase();
3552 for word in &words[1..] {
3553 let mut chars = word.chars();
3554 if let Some(first) = chars.next() {
3555 camel_case.push(first.to_uppercase().next().unwrap());
3556 camel_case.push_str(chars.as_str().to_lowercase().as_str());
3557 }
3558 }
3559 variations.push(camel_case);
3560
3561 variations.push(words.join("_").to_lowercase());
3563
3564 let pascal_case = words
3566 .iter()
3567 .map(|word| {
3568 let mut chars = word.chars();
3569 if let Some(first) = chars.next() {
3570 first
3571 .to_uppercase()
3572 .chain(chars.as_str().to_lowercase().chars())
3573 .collect()
3574 } else {
3575 String::new()
3576 }
3577 })
3578 .collect::<Vec<String>>()
3579 .join("");
3580 variations.push(pascal_case);
3581 }
3582
3583 let suffixes = [
3585 "er",
3586 "or",
3587 "ed",
3588 "ing",
3589 "s",
3590 "es",
3591 "Manager",
3592 "Service",
3593 "Handler",
3594 "Controller",
3595 ];
3596 let prefixes = ["get", "set", "is", "has", "can", "should", "will"];
3597
3598 for suffix in &suffixes {
3599 variations.push(format!("{}{}", concept_lower, suffix.to_lowercase()));
3600 }
3601
3602 for prefix in &prefixes {
3603 variations.push(format!("{prefix}{concept}"));
3604 }
3605
3606 if concept_lower.len() > 3 {
3608 variations.push(format!(".*{concept_lower}.*"));
3609 }
3610
3611 variations
3612 }
3613
3614 fn is_test_file(&self, file_path: &str) -> bool {
3616 let path_lower = file_path.to_lowercase();
3617 path_lower.contains("/test/")
3618 || path_lower.contains("/tests/")
3619 || path_lower.contains("\\test\\")
3620 || path_lower.contains("\\tests\\")
3621 || path_lower.ends_with("_test.rs")
3622 || path_lower.ends_with("_test.py")
3623 || path_lower.ends_with("_test.js")
3624 || path_lower.ends_with("_test.ts")
3625 || path_lower.ends_with(".test.js")
3626 || path_lower.ends_with(".test.ts")
3627 || path_lower.ends_with(".spec.js")
3628 || path_lower.ends_with(".spec.ts")
3629 || path_lower.contains("test_")
3630 || path_lower.contains("spec_")
3631 }
3632
3633 fn is_dependency_file(&self, file_path: &str) -> bool {
3635 let path_lower = file_path.to_lowercase();
3636 path_lower.contains("/node_modules/")
3637 || path_lower.contains("\\node_modules\\")
3638 || path_lower.contains("/vendor/")
3639 || path_lower.contains("\\vendor\\")
3640 || path_lower.contains("/target/")
3641 || path_lower.contains("\\target\\")
3642 || path_lower.contains("/.cargo/")
3643 || path_lower.contains("\\.cargo\\")
3644 || path_lower.contains("/build/")
3645 || path_lower.contains("\\build\\")
3646 || path_lower.contains("/dist/")
3647 || path_lower.contains("\\dist\\")
3648 || path_lower.contains("/coverage/")
3649 || path_lower.contains("\\coverage\\")
3650 }
3651
3652 fn matches_size_range(&self, file_size: u64, size_range: &str) -> bool {
3654 match size_range {
3655 "small" => file_size < 10_000, "medium" => (10_000..100_000).contains(&file_size), "large" => (100_000..1_000_000).contains(&file_size), "very_large" => file_size >= 1_000_000, range if range.contains("kb") => {
3660 if let Ok(kb) = range.trim_end_matches("kb").parse::<u64>() {
3661 file_size <= kb * 1_024
3662 } else {
3663 true
3664 }
3665 }
3666 range if range.contains("mb") => {
3667 if let Ok(mb) = range.trim_end_matches("mb").parse::<u64>() {
3668 file_size <= mb * 1_024 * 1_024
3669 } else {
3670 true
3671 }
3672 }
3673 range if range.contains("-") => {
3674 let parts: Vec<&str> = range.split('-').collect();
3676 if parts.len() == 2 {
3677 let min_size = Self::parse_size(parts[0]).unwrap_or(0);
3678 let max_size = Self::parse_size(parts[1]).unwrap_or(u64::MAX);
3679 file_size >= min_size && file_size <= max_size
3680 } else {
3681 true
3682 }
3683 }
3684 _ => true, }
3686 }
3687
3688 fn parse_size(size_str: &str) -> Option<u64> {
3690 let size_str = size_str.trim().to_lowercase();
3691 if size_str.ends_with("kb") {
3692 size_str
3693 .trim_end_matches("kb")
3694 .parse::<u64>()
3695 .ok()
3696 .map(|s| s * 1_024)
3697 } else if size_str.ends_with("mb") {
3698 size_str
3699 .trim_end_matches("mb")
3700 .parse::<u64>()
3701 .ok()
3702 .map(|s| s * 1_024 * 1_024)
3703 } else if size_str.ends_with("gb") {
3704 size_str
3705 .trim_end_matches("gb")
3706 .parse::<u64>()
3707 .ok()
3708 .map(|s| s * 1_024 * 1_024 * 1_024)
3709 } else {
3710 size_str.parse::<u64>().ok()
3711 }
3712 }
3713
3714 fn estimate_symbol_complexity(&self, node: &codeprism_core::Node) -> u32 {
3716 let mut complexity = 1; complexity += match node.kind {
3720 NodeKind::Function | NodeKind::Method => 3,
3721 NodeKind::Class => 5,
3722 NodeKind::Module => 2,
3723 NodeKind::Variable => 1,
3724 _ => 0,
3725 };
3726
3727 let span_lines = node.span.end_line.saturating_sub(node.span.start_line);
3729 complexity += match span_lines {
3730 0..=10 => 1,
3731 11..=50 => 3,
3732 51..=100 => 5,
3733 101..=200 => 8,
3734 _ => 10,
3735 };
3736
3737 if let serde_json::Value::Object(metadata) = &node.metadata {
3739 for (key, value) in metadata {
3741 if key.contains("complexity") || key.contains("cyclomatic") {
3742 if let Some(complex_value) = value.as_u64() {
3743 complexity += complex_value as u32;
3744 }
3745 }
3746 }
3747 }
3748
3749 complexity
3750 }
3751
3752 fn matches_complexity_filter(&self, complexity_score: u32, complexity_filter: &str) -> bool {
3754 match complexity_filter {
3755 "low" => complexity_score <= 5,
3756 "medium" => complexity_score > 5 && complexity_score <= 15,
3757 "high" => complexity_score > 15 && complexity_score <= 30,
3758 "very_high" => complexity_score > 30,
3759 filter if filter.starts_with("<=") => {
3760 if let Ok(threshold) = filter[2..].parse::<u32>() {
3761 complexity_score <= threshold
3762 } else {
3763 true
3764 }
3765 }
3766 filter if filter.starts_with(">=") => {
3767 if let Ok(threshold) = filter[2..].parse::<u32>() {
3768 complexity_score >= threshold
3769 } else {
3770 true
3771 }
3772 }
3773 filter if filter.starts_with('<') => {
3774 if let Ok(threshold) = filter[1..].parse::<u32>() {
3775 complexity_score < threshold
3776 } else {
3777 true
3778 }
3779 }
3780 filter if filter.starts_with('>') => {
3781 if let Ok(threshold) = filter[1..].parse::<u32>() {
3782 complexity_score > threshold
3783 } else {
3784 true
3785 }
3786 }
3787 _ => true, }
3789 }
3790
3791 fn matches_date_range(&self, modified_time: std::time::SystemTime, date_range: &str) -> bool {
3793 let now = std::time::SystemTime::now();
3794
3795 match date_range {
3796 "today" => {
3797 if let Ok(duration) = now.duration_since(modified_time) {
3798 duration.as_secs() < 24 * 60 * 60 } else {
3800 false
3801 }
3802 }
3803 "week" | "last_week" => {
3804 if let Ok(duration) = now.duration_since(modified_time) {
3805 duration.as_secs() < 7 * 24 * 60 * 60 } else {
3807 false
3808 }
3809 }
3810 "month" | "last_month" => {
3811 if let Ok(duration) = now.duration_since(modified_time) {
3812 duration.as_secs() < 30 * 24 * 60 * 60 } else {
3814 false
3815 }
3816 }
3817 "year" | "last_year" => {
3818 if let Ok(duration) = now.duration_since(modified_time) {
3819 duration.as_secs() < 365 * 24 * 60 * 60 } else {
3821 false
3822 }
3823 }
3824 range if range.ends_with("d") => {
3825 if let Ok(days) = range.trim_end_matches('d').parse::<u64>() {
3826 if let Ok(duration) = now.duration_since(modified_time) {
3827 duration.as_secs() < days * 24 * 60 * 60
3828 } else {
3829 false
3830 }
3831 } else {
3832 true
3833 }
3834 }
3835 range if range.ends_with("h") => {
3836 if let Ok(hours) = range.trim_end_matches('h').parse::<u64>() {
3837 if let Ok(duration) = now.duration_since(modified_time) {
3838 duration.as_secs() < hours * 60 * 60
3839 } else {
3840 false
3841 }
3842 } else {
3843 true
3844 }
3845 }
3846 _ => true, }
3848 }
3849
3850 fn generate_complexity_guidance(
3854 &self,
3855 target: &str,
3856 include_examples: bool,
3857 priority_level: &str,
3858 ) -> anyhow::Result<serde_json::Value> {
3859 let mut guidance = vec![
3862 "Break down large functions into smaller, focused methods".to_string(),
3863 "Reduce nested conditional statements using early returns".to_string(),
3864 "Extract complex logic into well-named helper functions".to_string(),
3865 "Consider using design patterns to simplify complex relationships".to_string(),
3866 ];
3867
3868 if priority_level == "high" {
3869 guidance.extend(vec![
3870 "URGENT: Identify and refactor functions with cyclomatic complexity > 15"
3871 .to_string(),
3872 "URGENT: Split classes with more than 500 lines of code".to_string(),
3873 ]);
3874 }
3875
3876 let mut suggestions = vec![
3877 serde_json::json!({
3878 "category": "Function Complexity",
3879 "suggestion": "Break down large functions",
3880 "reasoning": "Functions with high complexity are harder to test and maintain",
3881 "impact": "High",
3882 "effort": "Medium"
3883 }),
3884 serde_json::json!({
3885 "category": "Conditional Complexity",
3886 "suggestion": "Reduce nested conditions",
3887 "reasoning": "Deep nesting reduces readability and increases bug potential",
3888 "impact": "Medium",
3889 "effort": "Low"
3890 }),
3891 ];
3892
3893 if include_examples {
3894 suggestions.push(serde_json::json!({
3895 "category": "Example Refactoring",
3896 "suggestion": "Extract method pattern",
3897 "example": {
3898 "before": "def process_data(data):\n if data:\n if data.valid:\n if data.type == 'A':\n return process_type_a(data)\n elif data.type == 'B':\n return process_type_b(data)",
3899 "after": "def process_data(data):\n if not self.is_valid_data(data):\n return None\n return self.process_by_type(data)\n\ndef is_valid_data(self, data):\n return data and data.valid"
3900 },
3901 "impact": "High",
3902 "effort": "Medium"
3903 }));
3904 }
3905
3906 Ok(serde_json::json!({
3907 "status": "success",
3908 "guidance_type": "complexity",
3909 "target": target,
3910 "priority_level": priority_level,
3911 "recommendations": guidance,
3912 "detailed_suggestions": suggestions,
3913 "next_steps": [
3914 "Run complexity analysis to identify high-complexity areas",
3915 "Prioritize refactoring based on change frequency and bug reports",
3916 "Set up complexity metrics monitoring"
3917 ],
3918 "estimated_impact": {
3919 "maintainability": "High",
3920 "testability": "High",
3921 "bug_reduction": "Medium"
3922 }
3923 }))
3924 }
3925
3926 fn generate_performance_guidance(
3928 &self,
3929 target: &str,
3930 include_examples: bool,
3931 priority_level: &str,
3932 ) -> anyhow::Result<serde_json::Value> {
3933 let mut guidance = vec![
3934 "Profile code to identify actual bottlenecks before optimizing".to_string(),
3935 "Consider algorithmic improvements over micro-optimizations".to_string(),
3936 "Implement caching for expensive computations".to_string(),
3937 "Use appropriate data structures for access patterns".to_string(),
3938 ];
3939
3940 if priority_level == "high" {
3941 guidance.extend(vec![
3942 "URGENT: Address O(n²) algorithms in hot paths".to_string(),
3943 "URGENT: Implement database query optimization".to_string(),
3944 ]);
3945 }
3946
3947 let mut suggestions = vec![
3948 serde_json::json!({
3949 "category": "Algorithmic Efficiency",
3950 "suggestion": "Replace O(n²) algorithms with O(n log n) alternatives",
3951 "reasoning": "Algorithmic improvements provide the biggest performance gains",
3952 "impact": "Very High",
3953 "effort": "High"
3954 }),
3955 serde_json::json!({
3956 "category": "Data Access",
3957 "suggestion": "Implement appropriate caching strategies",
3958 "reasoning": "Avoid redundant computations and I/O operations",
3959 "impact": "High",
3960 "effort": "Medium"
3961 }),
3962 ];
3963
3964 if include_examples {
3965 suggestions.push(serde_json::json!({
3966 "category": "Example Optimization",
3967 "suggestion": "Replace linear search with hash lookup",
3968 "example": {
3969 "before": "for item in large_list:\n if item.id == target_id:\n return item",
3970 "after": "return id_to_item_map.get(target_id)"
3971 },
3972 "impact": "High",
3973 "effort": "Low"
3974 }));
3975 }
3976
3977 Ok(serde_json::json!({
3978 "status": "success",
3979 "guidance_type": "performance",
3980 "target": target,
3981 "priority_level": priority_level,
3982 "recommendations": guidance,
3983 "detailed_suggestions": suggestions,
3984 "next_steps": [
3985 "Run performance analysis to identify bottlenecks",
3986 "Set up performance monitoring and alerting",
3987 "Create performance benchmarks for critical paths"
3988 ],
3989 "estimated_impact": {
3990 "response_time": "High",
3991 "throughput": "High",
3992 "resource_usage": "Medium"
3993 }
3994 }))
3995 }
3996
3997 fn generate_security_guidance(
3999 &self,
4000 target: &str,
4001 include_examples: bool,
4002 priority_level: &str,
4003 ) -> anyhow::Result<serde_json::Value> {
4004 let mut guidance = vec![
4005 "Validate and sanitize all external inputs".to_string(),
4006 "Use parameterized queries to prevent SQL injection".to_string(),
4007 "Implement proper authentication and authorization".to_string(),
4008 "Keep dependencies updated to patch security vulnerabilities".to_string(),
4009 ];
4010
4011 if priority_level == "high" {
4012 guidance.extend(vec![
4013 "CRITICAL: Address any hardcoded credentials or secrets".to_string(),
4014 "CRITICAL: Fix SQL injection vulnerabilities immediately".to_string(),
4015 ]);
4016 }
4017
4018 let mut suggestions = vec![
4019 serde_json::json!({
4020 "category": "Input Validation",
4021 "suggestion": "Implement comprehensive input validation",
4022 "reasoning": "Prevents injection attacks and data corruption",
4023 "impact": "Very High",
4024 "effort": "Medium"
4025 }),
4026 serde_json::json!({
4027 "category": "Authentication",
4028 "suggestion": "Implement strong authentication mechanisms",
4029 "reasoning": "Prevents unauthorized access to sensitive data",
4030 "impact": "Very High",
4031 "effort": "High"
4032 }),
4033 ];
4034
4035 if include_examples {
4036 suggestions.push(serde_json::json!({
4037 "category": "Example Security Fix",
4038 "suggestion": "Parameterized queries for SQL injection prevention",
4039 "example": {
4040 "before": "query = \"SELECT * FROM users WHERE id = \" + user_id",
4041 "after": "query = \"SELECT * FROM users WHERE id = ?\"; execute(query, [user_id])"
4042 },
4043 "impact": "Very High",
4044 "effort": "Low"
4045 }));
4046 }
4047
4048 Ok(serde_json::json!({
4049 "status": "success",
4050 "guidance_type": "security",
4051 "target": target,
4052 "priority_level": priority_level,
4053 "recommendations": guidance,
4054 "detailed_suggestions": suggestions,
4055 "next_steps": [
4056 "Run security analysis to identify vulnerabilities",
4057 "Implement security testing in CI/CD pipeline",
4058 "Set up dependency vulnerability scanning"
4059 ],
4060 "estimated_impact": {
4061 "security_posture": "Very High",
4062 "compliance": "High",
4063 "risk_reduction": "Very High"
4064 }
4065 }))
4066 }
4067
4068 fn generate_workflow_guidance(
4070 &self,
4071 target: &str,
4072 _include_examples: bool,
4073 priority_level: &str,
4074 ) -> anyhow::Result<serde_json::Value> {
4075 let workflow_suggestions = vec![
4076 serde_json::json!({
4077 "workflow": "Code Review Process",
4078 "description": "Systematic approach to understanding and improving code",
4079 "steps": [
4080 "Start with repository overview using get_repository_info",
4081 "Identify key components with search_symbols",
4082 "Analyze complexity with analyze_complexity",
4083 "Review security with analyze_security",
4084 "Check performance with analyze_performance"
4085 ],
4086 "estimated_time": "30-45 minutes",
4087 "priority": priority_level
4088 }),
4089 serde_json::json!({
4090 "workflow": "Refactoring Workflow",
4091 "description": "Safe approach to code refactoring",
4092 "steps": [
4093 "Analyze current complexity and identify hotspots",
4094 "Find all references to symbols being changed",
4095 "Create comprehensive tests before refactoring",
4096 "Refactor incrementally with continuous testing",
4097 "Verify performance hasn't degraded"
4098 ],
4099 "estimated_time": "1-3 hours",
4100 "priority": priority_level
4101 }),
4102 ];
4103
4104 Ok(serde_json::json!({
4105 "status": "success",
4106 "guidance_type": "workflow",
4107 "target": target,
4108 "priority_level": priority_level,
4109 "available_workflows": workflow_suggestions,
4110 "recommended_tools": [
4111 "get_repository_info",
4112 "search_symbols",
4113 "analyze_complexity",
4114 "analyze_security",
4115 "analyze_performance",
4116 "find_references"
4117 ],
4118 "next_steps": [
4119 "Choose appropriate workflow based on current goals",
4120 "Execute workflow steps systematically",
4121 "Document findings and decisions"
4122 ]
4123 }))
4124 }
4125
4126 fn generate_general_guidance(
4128 &self,
4129 target: &str,
4130 _include_examples: bool,
4131 priority_level: &str,
4132 ) -> anyhow::Result<serde_json::Value> {
4133 let guidance = vec![
4134 "Follow established coding standards and style guides".to_string(),
4135 "Write comprehensive tests for all new functionality".to_string(),
4136 "Document complex logic and design decisions".to_string(),
4137 "Refactor regularly to prevent technical debt accumulation".to_string(),
4138 "Use version control effectively with meaningful commit messages".to_string(),
4139 ];
4140
4141 Ok(serde_json::json!({
4142 "status": "success",
4143 "guidance_type": "general",
4144 "target": target,
4145 "priority_level": priority_level,
4146 "recommendations": guidance,
4147 "best_practices": [
4148 "Code should be self-documenting through clear naming",
4149 "Follow the principle of least surprise in API design",
4150 "Optimize for readability over cleverness",
4151 "Test behavior, not implementation details"
4152 ],
4153 "available_guidance_types": [
4154 "complexity - Focus on reducing code complexity",
4155 "performance - Optimize for speed and efficiency",
4156 "security - Address security vulnerabilities",
4157 "workflow - Get systematic analysis workflows"
4158 ],
4159 "next_steps": [
4160 "Choose specific guidance type for targeted advice",
4161 "Run relevant analysis tools to identify issues",
4162 "Implement improvements incrementally"
4163 ]
4164 }))
4165 }
4166
4167 #[allow(clippy::too_many_arguments)]
4169 fn analyze_specialized_comprehensive(
4170 &self,
4171 target: &str,
4172 analysis_domains: &[String],
4173 severity_threshold: &str,
4174 rule_sets: &[String],
4175 domain_options: Option<&serde_json::Value>,
4176 include_recommendations: bool,
4177 detailed_analysis: bool,
4178 ) -> anyhow::Result<serde_json::Value> {
4179 let security_analysis = serde_json::json!({
4184 "vulnerabilities_found": 3,
4185 "risk_level": "medium",
4186 "issues": [
4187 {
4188 "type": "SQL Injection",
4189 "severity": "high",
4190 "line": 3,
4191 "description": "Direct string formatting in SQL query allows injection attacks",
4192 "recommendation": "Use parameterized queries or prepared statements"
4193 },
4194 {
4195 "type": "Unsafe Code Block",
4196 "severity": "medium",
4197 "line": 14,
4198 "description": "Unsafe static variable access without synchronization",
4199 "recommendation": "Use atomic types or proper synchronization primitives"
4200 },
4201 {
4202 "type": "Race Condition",
4203 "severity": "high",
4204 "line": 13,
4205 "description": "Unsynchronized access to shared mutable state",
4206 "recommendation": "Use Mutex, RwLock, or atomic operations for thread safety"
4207 }
4208 ],
4209 "data_flow_analysis": {
4210 "tainted_inputs": 1,
4211 "sanitization_points": 0,
4212 "exposure_risk": "high"
4213 }
4214 });
4215
4216 let concurrency_analysis = serde_json::json!({
4218 "race_conditions": 1,
4219 "deadlock_potential": "low",
4220 "thread_safety_issues": 2,
4221 "synchronization_analysis": {
4222 "unsafe_operations": 1,
4223 "unprotected_shared_state": 1,
4224 "atomic_usage": 0,
4225 "lock_usage": 0
4226 },
4227 "async_patterns": {
4228 "blocking_calls_in_async": 0,
4229 "async_error_handling": "needs_improvement"
4230 }
4231 });
4232
4233 let architecture_analysis = serde_json::json!({
4235 "design_patterns": {
4236 "detected": [],
4237 "anti_patterns": ["god_object"],
4238 "recommendations": ["single_responsibility", "dependency_injection"]
4239 },
4240 "coupling_analysis": {
4241 "overall_coupling": "high",
4242 "tight_coupling_instances": 1,
4243 "cohesion": "low"
4244 },
4245 "solid_principles": {
4246 "single_responsibility": "violated",
4247 "open_closed": "unknown",
4248 "liskov_substitution": "unknown",
4249 "interface_segregation": "unknown",
4250 "dependency_inversion": "unknown"
4251 },
4252 "code_organization": {
4253 "separation_of_concerns": "poor",
4254 "responsibilities_per_class": 8,
4255 "recommended_max": 3
4256 }
4257 });
4258
4259 let performance_analysis = serde_json::json!({
4261 "hotspots": [
4262 {
4263 "location": "inefficient_search function",
4264 "issue": "O(n³) algorithmic complexity",
4265 "severity": "high",
4266 "line": 44,
4267 "recommendation": "Use more efficient search algorithm or data structures"
4268 }
4269 ],
4270 "algorithm_complexity": {
4271 "worst_case": "O(n³)",
4272 "space_complexity": "O(1)",
4273 "optimization_potential": "very_high"
4274 },
4275 "resource_usage": {
4276 "memory_allocation_patterns": "acceptable",
4277 "io_bottlenecks": 0,
4278 "cpu_intensive_operations": 1
4279 }
4280 });
4281
4282 let mut domain_results = serde_json::Map::new();
4284
4285 if analysis_domains.contains(&"all".to_string())
4286 || analysis_domains.contains(&"security".to_string())
4287 {
4288 domain_results.insert("security".to_string(), security_analysis);
4289 }
4290 if analysis_domains.contains(&"all".to_string())
4291 || analysis_domains.contains(&"concurrency".to_string())
4292 {
4293 domain_results.insert("concurrency".to_string(), concurrency_analysis);
4294 }
4295 if analysis_domains.contains(&"all".to_string())
4296 || analysis_domains.contains(&"architecture".to_string())
4297 {
4298 domain_results.insert("architecture".to_string(), architecture_analysis);
4299 }
4300 if analysis_domains.contains(&"all".to_string())
4301 || analysis_domains.contains(&"performance".to_string())
4302 {
4303 domain_results.insert("performance".to_string(), performance_analysis);
4304 }
4305
4306 let mut recommendations = Vec::new();
4308 if include_recommendations {
4309 recommendations
4310 .push("Critical: Fix SQL injection vulnerability immediately".to_string());
4311 recommendations
4312 .push("High: Implement proper thread synchronization for shared state".to_string());
4313 recommendations.push(
4314 "Medium: Refactor MassiveClass to follow Single Responsibility Principle"
4315 .to_string(),
4316 );
4317 recommendations.push(
4318 "High: Replace O(n³) search algorithm with more efficient approach".to_string(),
4319 );
4320 recommendations
4321 .push("Consider using async/await patterns for I/O operations".to_string());
4322 }
4323
4324 let overall_severity = serde_json::json!({
4326 "critical": 0,
4327 "high": 3,
4328 "medium": 1,
4329 "low": 0,
4330 "total_issues": 4
4331 });
4332
4333 Ok(serde_json::json!({
4334 "status": "success",
4335 "target": target,
4336 "analysis_type": "specialized",
4337 "domains_analyzed": analysis_domains,
4338 "domain_analysis": domain_results,
4339 "overall_severity": overall_severity,
4340 "cross_domain_insights": [
4341 "Security and concurrency issues often compound each other",
4342 "Architectural problems like god objects increase security attack surface",
4343 "Performance issues may indicate deeper architectural problems"
4344 ],
4345 "recommendations": recommendations,
4346 "settings": {
4347 "analysis_domains": analysis_domains,
4348 "severity_threshold": severity_threshold,
4349 "rule_sets": rule_sets,
4350 "domain_options": domain_options,
4351 "include_recommendations": include_recommendations,
4352 "detailed_analysis": detailed_analysis
4353 }
4354 }))
4355 }
4356
4357 fn analyze_javascript_comprehensive(
4359 &self,
4360 target: &str,
4361 analysis_types: &[String],
4362 es_target: &str,
4363 framework_hints: &[String],
4364 include_recommendations: bool,
4365 detailed_analysis: bool,
4366 ) -> anyhow::Result<serde_json::Value> {
4367 let es_analysis = serde_json::json!({
4371 "detected_version": "ES2020",
4372 "target_compatibility": es_target,
4373 "compatibility_score": 92.5,
4374 "used_features": {
4375 "arrow_functions": 45,
4376 "destructuring": 23,
4377 "async_await": 12,
4378 "optional_chaining": 8,
4379 "nullish_coalescing": 3,
4380 "template_literals": 34,
4381 "spread_operator": 15
4382 },
4383 "compatibility_issues": [
4384 {
4385 "feature": "optional_chaining",
4386 "line": 42,
4387 "suggestion": "Use traditional property access for older browser support"
4388 }
4389 ]
4390 });
4391
4392 let async_patterns = serde_json::json!({
4393 "total_async_operations": 28,
4394 "promise_usage": 22,
4395 "callback_usage": 6,
4396 "async_await_usage": 15,
4397 "callback_depth": {
4398 "max_depth": 3,
4399 "average_depth": 1.8,
4400 "deeply_nested_count": 1
4401 },
4402 "patterns": {
4403 "promise_chains": 8,
4404 "async_functions": 12,
4405 "callback_hell": 0,
4406 "event_listeners": 5
4407 }
4408 });
4409
4410 let framework_analysis = serde_json::json!({
4411 "detected_frameworks": [
4412 {
4413 "name": "React",
4414 "confidence": 95.2,
4415 "version_hint": "18.x",
4416 "patterns_found": {
4417 "jsx_elements": 156,
4418 "hooks": 23,
4419 "components": 34,
4420 "context_usage": 5
4421 }
4422 }
4423 ],
4424 "library_usage": {
4425 "axios": 12,
4426 "lodash": 0,
4427 "moment": 0
4428 }
4429 });
4430
4431 let performance_analysis = serde_json::json!({
4432 "potential_issues": [
4433 {
4434 "type": "Efficient React Patterns",
4435 "severity": "low",
4436 "line": 15,
4437 "description": "Using React hooks efficiently"
4438 }
4439 ],
4440 "optimization_opportunities": [
4441 {
4442 "type": "Async Optimization",
4443 "impact": "medium",
4444 "description": "Consider using Promise.all for parallel async operations"
4445 }
4446 ]
4447 });
4448
4449 let best_practices = serde_json::json!({
4450 "score": 88.5,
4451 "violations": [
4452 {
4453 "rule": "Consistent async patterns",
4454 "severity": "low",
4455 "count": 2,
4456 "description": "Mix of Promise and async/await patterns detected"
4457 }
4458 ]
4459 });
4460
4461 let mut recommendations = Vec::new();
4462 if include_recommendations {
4463 recommendations
4464 .push("Consider upgrading to ES2021 features for better performance".to_string());
4465 recommendations.push("Use async/await consistently for better readability".to_string());
4466 recommendations.push("Add error boundaries for React components".to_string());
4467 }
4468
4469 Ok(serde_json::json!({
4470 "status": "success",
4471 "target": target,
4472 "analysis_type": "javascript",
4473 "es_analysis": es_analysis,
4474 "async_patterns": async_patterns,
4475 "framework_analysis": framework_analysis,
4476 "performance_analysis": performance_analysis,
4477 "best_practices": best_practices,
4478 "recommendations": recommendations,
4479 "settings": {
4480 "analysis_types": analysis_types,
4481 "es_target": es_target,
4482 "framework_hints": framework_hints,
4483 "include_recommendations": include_recommendations,
4484 "detailed_analysis": detailed_analysis
4485 }
4486 }))
4487 }
4488
4489 fn analyze_code_quality_comprehensive(
4491 &self,
4492 target: &str,
4493 quality_types: &[String],
4494 severity_threshold: &str,
4495 include_recommendations: bool,
4496 detailed_analysis: bool,
4497 ) -> anyhow::Result<serde_json::Value> {
4498 let quality_metrics = serde_json::json!({
4502 "overall_score": 7.8,
4503 "maintainability_index": 72.5,
4504 "technical_debt_ratio": 12.3,
4505 "documentation_coverage": 76.3
4506 });
4507
4508 let code_smells = serde_json::json!({
4509 "total_count": 14,
4510 "by_severity": {
4511 "critical": 0,
4512 "high": 2,
4513 "medium": 7,
4514 "low": 5
4515 },
4516 "by_category": {
4517 "long_methods": 3,
4518 "god_classes": 1,
4519 "feature_envy": 2,
4520 "data_clumps": 1,
4521 "primitive_obsession": 4,
4522 "large_parameter_lists": 3
4523 },
4524 "detailed_issues": []
4525 });
4526
4527 let duplication_analysis = serde_json::json!({
4528 "percentage": 3.2,
4529 "duplicate_blocks": 8,
4530 "similar_blocks": 12,
4531 "affected_files": 6
4532 });
4533
4534 let naming_analysis = serde_json::json!({
4535 "compliance_score": 89.2,
4536 "violations": 15,
4537 "conventions_checked": ["camelCase", "PascalCase", "snake_case"]
4538 });
4539
4540 let mut recommendations = Vec::new();
4541 if include_recommendations {
4542 recommendations
4543 .push("Break down large functions into smaller, focused methods".to_string());
4544 recommendations.push("Improve naming consistency across the codebase".to_string());
4545 recommendations.push("Reduce code duplication through refactoring".to_string());
4546 }
4547
4548 Ok(serde_json::json!({
4549 "status": "success",
4550 "target": target,
4551 "analysis_type": "comprehensive",
4552 "quality_metrics": quality_metrics,
4553 "code_smells": code_smells,
4554 "duplication_analysis": duplication_analysis,
4555 "naming_analysis": naming_analysis,
4556 "recommendations": recommendations,
4557 "settings": {
4558 "quality_types": quality_types,
4559 "severity_threshold": severity_threshold,
4560 "include_recommendations": include_recommendations,
4561 "detailed_analysis": detailed_analysis
4562 }
4563 }))
4564 }
4565
4566 fn generate_optimization_suggestions(
4568 &self,
4569 target: &str,
4570 optimization_types: &[String],
4571 aggressive_mode: bool,
4572 max_suggestions: usize,
4573 ) -> anyhow::Result<serde_json::Value> {
4574 let mut suggestions = Vec::new();
4575
4576 for opt_type in optimization_types {
4577 match opt_type.as_str() {
4578 "performance" => {
4579 suggestions.extend(vec![
4580 serde_json::json!({
4581 "type": "performance",
4582 "category": "Algorithmic",
4583 "suggestion": "Replace O(n²) algorithms with more efficient alternatives",
4584 "impact_score": 9,
4585 "effort_score": 7,
4586 "implementation": "Use hash maps for lookups instead of linear searches"
4587 }),
4588 serde_json::json!({
4589 "type": "performance",
4590 "category": "Caching",
4591 "suggestion": "Implement result caching for expensive computations",
4592 "impact_score": 8,
4593 "effort_score": 5,
4594 "implementation": "Add LRU cache for database queries and complex calculations"
4595 }),
4596 ]);
4597 }
4598 "maintainability" => {
4599 suggestions.extend(vec![
4600 serde_json::json!({
4601 "type": "maintainability",
4602 "category": "Function Size",
4603 "suggestion": "Break down large functions into smaller, focused methods",
4604 "impact_score": 8,
4605 "effort_score": 6,
4606 "implementation": "Extract methods using single responsibility principle"
4607 }),
4608 serde_json::json!({
4609 "type": "maintainability",
4610 "category": "Code Duplication",
4611 "suggestion": "Extract common code into reusable functions",
4612 "impact_score": 7,
4613 "effort_score": 4,
4614 "implementation": "Create utility functions for repeated logic patterns"
4615 }),
4616 ]);
4617 }
4618 "memory" => {
4619 suggestions.extend(vec![serde_json::json!({
4620 "type": "memory",
4621 "category": "Memory Usage",
4622 "suggestion": "Use memory-efficient data structures",
4623 "impact_score": 6,
4624 "effort_score": 5,
4625 "implementation": "Replace large objects with more compact representations"
4626 })]);
4627 }
4628 "refactoring" => {
4629 suggestions.extend(vec![
4630 serde_json::json!({
4631 "type": "refactoring",
4632 "category": "Design Patterns",
4633 "suggestion": "Apply appropriate design patterns to reduce complexity",
4634 "impact_score": 8,
4635 "effort_score": 8,
4636 "implementation": "Use Strategy pattern for conditional logic, Factory for object creation"
4637 }),
4638 ]);
4639 }
4640 _ => {
4641 }
4643 }
4644 }
4645
4646 if aggressive_mode {
4647 suggestions.extend(vec![
4648 serde_json::json!({
4649 "type": "aggressive",
4650 "category": "Architecture",
4651 "suggestion": "Consider microservices architecture for large monoliths",
4652 "impact_score": 10,
4653 "effort_score": 10,
4654 "implementation": "Split application into domain-bounded services"
4655 }),
4656 serde_json::json!({
4657 "type": "aggressive",
4658 "category": "Technology Stack",
4659 "suggestion": "Evaluate newer technologies for performance-critical components",
4660 "impact_score": 9,
4661 "effort_score": 9,
4662 "implementation": "Consider Rust/Go for computational hotspots"
4663 }),
4664 ]);
4665 }
4666
4667 suggestions.sort_by(|a, b| {
4669 b["impact_score"]
4670 .as_u64()
4671 .unwrap_or(0)
4672 .cmp(&a["impact_score"].as_u64().unwrap_or(0))
4673 });
4674 suggestions.truncate(max_suggestions);
4675
4676 let total_impact: u64 = suggestions
4677 .iter()
4678 .map(|s| s["impact_score"].as_u64().unwrap_or(0))
4679 .sum();
4680 let total_effort: u64 = suggestions
4681 .iter()
4682 .map(|s| s["effort_score"].as_u64().unwrap_or(0))
4683 .sum();
4684
4685 Ok(serde_json::json!({
4686 "status": "success",
4687 "target": target,
4688 "optimization_types": optimization_types,
4689 "aggressive_mode": aggressive_mode,
4690 "suggestions": suggestions,
4691 "summary": {
4692 "total_suggestions": suggestions.len(),
4693 "total_impact_score": total_impact,
4694 "total_effort_score": total_effort,
4695 "efficiency_ratio": if total_effort > 0 { total_impact as f64 / total_effort as f64 } else { 0.0 }
4696 },
4697 "implementation_strategy": {
4698 "quick_wins": suggestions.iter()
4699 .filter(|s| s["effort_score"].as_u64().unwrap_or(10) <= 4)
4700 .take(3)
4701 .collect::<Vec<_>>(),
4702 "high_impact": suggestions.iter()
4703 .filter(|s| s["impact_score"].as_u64().unwrap_or(0) >= 8)
4704 .take(3)
4705 .collect::<Vec<_>>()
4706 },
4707 "next_steps": [
4708 "Prioritize suggestions based on impact and effort scores",
4709 "Implement quick wins first to build momentum",
4710 "Plan high-effort changes as part of major refactoring cycles"
4711 ]
4712 }))
4713 }
4714
4715 fn analyze_specific_target_dependencies(
4717 &self,
4718 target: &str,
4719 dependency_type: &str,
4720 max_depth: usize,
4721 include_transitive: bool,
4722 ) -> anyhow::Result<serde_json::Value> {
4723 let node_id = match codeprism_core::NodeId::from_hex(target) {
4725 Ok(id) => id,
4726 Err(_) => {
4727 return Ok(serde_json::json!({
4728 "status": "error",
4729 "message": format!("Invalid target symbol ID format: {target}. Expected hexadecimal string.")
4730 }));
4731 }
4732 };
4733
4734 let target_node = match self.graph_store.get_node(&node_id) {
4736 Some(node) => node,
4737 None => {
4738 return Ok(serde_json::json!({
4739 "status": "error",
4740 "message": format!("Target symbol not found: {target}")
4741 }));
4742 }
4743 };
4744
4745 let mut all_dependencies = Vec::new();
4746 let mut dependency_stats = std::collections::HashMap::new();
4747
4748 let dependency_types = if dependency_type == "all" {
4750 vec!["direct", "calls", "imports", "reads", "writes"]
4751 } else {
4752 vec![dependency_type]
4753 };
4754
4755 for dep_type in dependency_types {
4756 let parsed_dep_type = match dep_type {
4757 "direct" => DependencyType::Direct,
4758 "calls" => DependencyType::Calls,
4759 "imports" => DependencyType::Imports,
4760 "reads" => DependencyType::Reads,
4761 "writes" => DependencyType::Writes,
4762 _ => continue,
4763 };
4764
4765 if let Ok(dependencies) = self
4767 .graph_query
4768 .find_dependencies(&node_id, parsed_dep_type.clone())
4769 {
4770 dependency_stats.insert(dep_type.to_string(), dependencies.len());
4771
4772 for dependency in dependencies {
4773 let mut dependency_info = serde_json::json!({
4774 "target_symbol": {
4775 "id": dependency.target_node.id.to_hex(),
4776 "name": dependency.target_node.name,
4777 "kind": format!("{:?}", dependency.target_node.kind),
4778 "language": format!("{:?}", dependency.target_node.lang),
4779 "file": dependency.target_node.file.display().to_string(),
4780 "span": {
4781 "start_line": dependency.target_node.span.start_line,
4782 "start_column": dependency.target_node.span.start_column,
4783 "end_line": dependency.target_node.span.end_line,
4784 "end_column": dependency.target_node.span.end_column,
4785 }
4786 },
4787 "dependency_type": dep_type,
4788 "edge_type": format!("{:?}", dependency.edge_kind),
4789 "depth": 1
4790 });
4791
4792 if include_transitive && max_depth > 1 {
4794 let transitive_deps = self.find_transitive_dependencies(
4795 &dependency.target_node.id,
4796 &parsed_dep_type,
4797 max_depth - 1,
4798 2,
4799 )?;
4800 dependency_info["transitive_dependencies"] =
4801 serde_json::Value::Array(transitive_deps);
4802 }
4803
4804 all_dependencies.push(dependency_info);
4805 }
4806 }
4807 }
4808
4809 let total_dependencies = all_dependencies.len();
4811 let unique_files: std::collections::HashSet<String> = all_dependencies
4812 .iter()
4813 .map(|dep| {
4814 dep["target_symbol"]["file"]
4815 .as_str()
4816 .unwrap_or("")
4817 .to_string()
4818 })
4819 .collect();
4820
4821 Ok(serde_json::json!({
4822 "status": "success",
4823 "analysis_type": "specific_target",
4824 "target": {
4825 "id": target,
4826 "name": target_node.name,
4827 "kind": format!("{:?}", target_node.kind),
4828 "file": target_node.file.display().to_string(),
4829 "language": format!("{:?}", target_node.lang)
4830 },
4831 "dependency_analysis": {
4832 "total_dependencies": total_dependencies,
4833 "dependency_breakdown": dependency_stats,
4834 "unique_files_affected": unique_files.len(),
4835 "files_affected": unique_files.into_iter().collect::<Vec<_>>(),
4836 "max_depth_analyzed": max_depth,
4837 "includes_transitive": include_transitive
4838 },
4839 "dependencies": all_dependencies,
4840 "insights": self.generate_dependency_insights(&all_dependencies, total_dependencies)
4841 }))
4842 }
4843
4844 fn analyze_repository_dependencies(
4846 &self,
4847 dependency_type: &str,
4848 max_depth: usize,
4849 include_transitive: bool,
4850 ) -> anyhow::Result<serde_json::Value> {
4851 let mut all_nodes = Vec::new();
4853 for symbol_entry in self.graph_store.iter_symbol_index() {
4854 for node_id in symbol_entry.1 {
4855 if let Some(node) = self.graph_store.get_node(&node_id) {
4856 all_nodes.push(node);
4857 }
4858 }
4859 }
4860
4861 if all_nodes.is_empty() {
4862 return Ok(serde_json::json!({
4863 "status": "error",
4864 "message": "No symbols found in repository. Make sure repository has been initialized."
4865 }));
4866 }
4867
4868 let mut repository_dependencies = Vec::new();
4869 let mut global_stats = std::collections::HashMap::new();
4870 let mut file_dependencies = std::collections::HashMap::<String, usize>::new();
4871 let mut language_stats = std::collections::HashMap::<String, usize>::new();
4872
4873 let sample_size = 100.min(all_nodes.len());
4875 let sampled_nodes: Vec<_> = all_nodes.iter().take(sample_size).collect();
4876
4877 for node in sampled_nodes {
4878 let file_path = node.file.display().to_string();
4879 let language = format!("{:?}", node.lang);
4880
4881 *file_dependencies.entry(file_path.clone()).or_insert(0) += 1;
4882 *language_stats.entry(language).or_insert(0) += 1;
4883
4884 let dependency_types = if dependency_type == "all" {
4886 vec!["direct", "calls", "imports"]
4887 } else {
4888 vec![dependency_type]
4889 };
4890
4891 let mut node_dependency_count = 0;
4892
4893 for dep_type in dependency_types {
4894 let parsed_dep_type = match dep_type {
4895 "direct" => DependencyType::Direct,
4896 "calls" => DependencyType::Calls,
4897 "imports" => DependencyType::Imports,
4898 "reads" => DependencyType::Reads,
4899 "writes" => DependencyType::Writes,
4900 _ => continue,
4901 };
4902
4903 if let Ok(dependencies) = self
4904 .graph_query
4905 .find_dependencies(&node.id, parsed_dep_type)
4906 {
4907 node_dependency_count += dependencies.len();
4908 *global_stats.entry(dep_type.to_string()).or_insert(0) += dependencies.len();
4909 }
4910 }
4911
4912 if node_dependency_count > 0 {
4913 repository_dependencies.push(serde_json::json!({
4914 "symbol": {
4915 "id": node.id.to_hex(),
4916 "name": node.name,
4917 "kind": format!("{:?}", node.kind),
4918 "file": file_path,
4919 "language": format!("{:?}", node.lang)
4920 },
4921 "dependency_count": node_dependency_count
4922 }));
4923 }
4924 }
4925
4926 repository_dependencies.sort_by(|a, b| {
4928 b["dependency_count"]
4929 .as_u64()
4930 .unwrap_or(0)
4931 .cmp(&a["dependency_count"].as_u64().unwrap_or(0))
4932 });
4933
4934 let top_dependencies = repository_dependencies
4935 .iter()
4936 .take(20)
4937 .cloned()
4938 .collect::<Vec<_>>();
4939
4940 let total_dependencies: usize = global_stats.values().sum();
4942 let most_connected_files: Vec<_> = {
4943 let mut file_deps: Vec<_> = file_dependencies.into_iter().collect();
4944 file_deps.sort_by(|a, b| b.1.cmp(&a.1));
4945 file_deps.into_iter().take(10).collect()
4946 };
4947
4948 Ok(serde_json::json!({
4949 "status": "success",
4950 "analysis_type": "repository_wide",
4951 "repository_summary": {
4952 "total_symbols_analyzed": sample_size,
4953 "total_dependencies_found": total_dependencies,
4954 "dependency_breakdown": global_stats,
4955 "languages": language_stats,
4956 "max_depth_analyzed": max_depth,
4957 "includes_transitive": include_transitive
4958 },
4959 "top_dependent_symbols": top_dependencies,
4960 "most_connected_files": most_connected_files,
4961 "insights": self.generate_repository_dependency_insights(&global_stats, total_dependencies, sample_size),
4962 "note": if all_nodes.len() > sample_size {
4963 format!("Analysis performed on {} sample symbols out of {} total symbols", sample_size, all_nodes.len())
4964 } else {
4965 "Complete repository analysis performed".to_string()
4966 }
4967 }))
4968 }
4969
4970 fn find_transitive_dependencies(
4972 &self,
4973 node_id: &codeprism_core::NodeId,
4974 dependency_type: &DependencyType,
4975 max_depth: usize,
4976 current_depth: usize,
4977 ) -> anyhow::Result<Vec<serde_json::Value>> {
4978 let mut transitive_deps = Vec::new();
4979
4980 if current_depth > max_depth {
4981 return Ok(transitive_deps);
4982 }
4983
4984 if let Ok(dependencies) = self
4985 .graph_query
4986 .find_dependencies(node_id, dependency_type.clone())
4987 {
4988 for dependency in dependencies {
4989 transitive_deps.push(serde_json::json!({
4990 "target_symbol": {
4991 "id": dependency.target_node.id.to_hex(),
4992 "name": dependency.target_node.name,
4993 "kind": format!("{:?}", dependency.target_node.kind),
4994 "file": dependency.target_node.file.display().to_string()
4995 },
4996 "depth": current_depth,
4997 "edge_type": format!("{:?}", dependency.edge_kind)
4998 }));
4999
5000 if current_depth < max_depth {
5002 let deeper_deps = self.find_transitive_dependencies(
5003 &dependency.target_node.id,
5004 dependency_type,
5005 max_depth,
5006 current_depth + 1,
5007 )?;
5008 transitive_deps.extend(deeper_deps);
5009 }
5010 }
5011 }
5012
5013 Ok(transitive_deps)
5014 }
5015
5016 fn generate_dependency_insights(
5018 &self,
5019 dependencies: &[serde_json::Value],
5020 total_count: usize,
5021 ) -> Vec<String> {
5022 let mut insights = Vec::new();
5023
5024 if total_count == 0 {
5025 insights.push("No dependencies found for this symbol".to_string());
5026 return insights;
5027 }
5028
5029 let unique_files: std::collections::HashSet<_> = dependencies
5031 .iter()
5032 .map(|dep| dep["target_symbol"]["file"].as_str().unwrap_or(""))
5033 .collect();
5034
5035 if unique_files.len() == 1 {
5036 insights
5037 .push("All dependencies are within the same file - good encapsulation".to_string());
5038 } else if unique_files.len() > total_count / 2 {
5039 insights.push(
5040 "Dependencies are spread across many files - consider consolidation".to_string(),
5041 );
5042 }
5043
5044 let mut type_counts = std::collections::HashMap::new();
5046 for dep in dependencies {
5047 if let Some(dep_type) = dep["dependency_type"].as_str() {
5048 *type_counts.entry(dep_type).or_insert(0) += 1;
5049 }
5050 }
5051
5052 if let Some(max_type) = type_counts.iter().max_by_key(|(_, &count)| count) {
5053 insights.push(format!(
5054 "Primary dependency type: {} ({} occurrences)",
5055 max_type.0, max_type.1
5056 ));
5057 }
5058
5059 if total_count > 10 {
5060 insights.push(
5061 "High number of dependencies - consider refactoring for better modularity"
5062 .to_string(),
5063 );
5064 } else if total_count < 3 {
5065 insights.push("Low coupling - good design isolation".to_string());
5066 }
5067
5068 insights
5069 }
5070
5071 fn generate_repository_dependency_insights(
5073 &self,
5074 stats: &std::collections::HashMap<String, usize>,
5075 total_deps: usize,
5076 symbols_analyzed: usize,
5077 ) -> Vec<String> {
5078 let mut insights = Vec::new();
5079
5080 let avg_deps_per_symbol = if symbols_analyzed > 0 {
5081 total_deps as f64 / symbols_analyzed as f64
5082 } else {
5083 0.0
5084 };
5085
5086 insights.push(format!(
5087 "Average dependencies per symbol: {avg_deps_per_symbol:.1}"
5088 ));
5089
5090 if avg_deps_per_symbol > 8.0 {
5091 insights.push("High average coupling - consider architectural refactoring".to_string());
5092 } else if avg_deps_per_symbol < 2.0 {
5093 insights.push("Low coupling observed - good modular design".to_string());
5094 }
5095
5096 if let Some(max_type) = stats.iter().max_by_key(|(_, &count)| count) {
5098 let percentage = (*max_type.1 as f64 / total_deps as f64) * 100.0;
5099 insights.push(format!(
5100 "Dominant dependency type: {} ({:.1}%)",
5101 max_type.0, percentage
5102 ));
5103 }
5104
5105 if total_deps > symbols_analyzed * 10 {
5106 insights.push(
5107 "Very high dependency density - potential for circular dependencies".to_string(),
5108 );
5109 }
5110
5111 insights
5112 }
5113
5114 fn analyze_control_flow_patterns(
5116 &self,
5117 target: &str,
5118 analysis_types: &[String],
5119 max_depth: usize,
5120 include_paths: bool,
5121 ) -> anyhow::Result<serde_json::Value> {
5122 let result = if std::path::Path::new(target).exists() {
5124 self.analyze_file_control_flow(target, analysis_types, max_depth, include_paths)
5126 } else if target.len() == 64 && target.chars().all(|c| c.is_ascii_hexdigit()) {
5127 self.analyze_symbol_control_flow(target, analysis_types, max_depth, include_paths)
5129 } else if target.starts_with("**") || target.contains("*") {
5130 self.analyze_pattern_control_flow(target, analysis_types, max_depth, include_paths)
5132 } else {
5133 return Ok(serde_json::json!({
5134 "status": "error",
5135 "message": format!("Target '{target}' not found. Provide a file path, symbol ID, or glob pattern.")
5136 }));
5137 };
5138
5139 result
5140 }
5141
5142 fn analyze_file_control_flow(
5144 &self,
5145 file_path: &str,
5146 analysis_types: &[String],
5147 max_depth: usize,
5148 include_paths: bool,
5149 ) -> anyhow::Result<serde_json::Value> {
5150 let file_path_buf = std::path::PathBuf::from(file_path);
5152 let file_nodes = self.graph_store.get_nodes_in_file(&file_path_buf);
5153
5154 if file_nodes.is_empty() {
5155 return Ok(serde_json::json!({
5156 "status": "error",
5157 "message": format!("No symbols found in file: {file_path}")
5158 }));
5159 }
5160
5161 let mut control_flow_analysis = Vec::new();
5162 let mut file_stats = std::collections::HashMap::new();
5163
5164 for node in file_nodes {
5165 let node_analysis =
5166 self.analyze_node_control_flow(&node, analysis_types, max_depth, include_paths)?;
5167
5168 if let Some(patterns) = node_analysis.get("control_flow_patterns") {
5170 for (pattern_type, count) in patterns.as_object().unwrap_or(&serde_json::Map::new())
5171 {
5172 *file_stats.entry(pattern_type.clone()).or_insert(0) +=
5173 count.as_u64().unwrap_or(0) as usize;
5174 }
5175 }
5176
5177 control_flow_analysis.push(node_analysis);
5178 }
5179
5180 Ok(serde_json::json!({
5181 "status": "success",
5182 "analysis_type": "file",
5183 "target": file_path,
5184 "symbols_analyzed": control_flow_analysis.len(),
5185 "file_statistics": file_stats,
5186 "symbol_analyses": control_flow_analysis,
5187 "settings": {
5188 "analysis_types": analysis_types,
5189 "max_depth": max_depth,
5190 "include_paths": include_paths
5191 }
5192 }))
5193 }
5194
5195 fn analyze_symbol_control_flow(
5197 &self,
5198 symbol_id: &str,
5199 analysis_types: &[String],
5200 max_depth: usize,
5201 include_paths: bool,
5202 ) -> anyhow::Result<serde_json::Value> {
5203 let node_id = match codeprism_core::NodeId::from_hex(symbol_id) {
5205 Ok(id) => id,
5206 Err(_) => {
5207 return Ok(serde_json::json!({
5208 "status": "error",
5209 "message": format!("Invalid symbol ID format: {symbol_id}")
5210 }));
5211 }
5212 };
5213
5214 let node = match self.graph_store.get_node(&node_id) {
5216 Some(node) => node,
5217 None => {
5218 return Ok(serde_json::json!({
5219 "status": "error",
5220 "message": format!("Symbol not found: {symbol_id}")
5221 }));
5222 }
5223 };
5224
5225 let analysis =
5226 self.analyze_node_control_flow(&node, analysis_types, max_depth, include_paths)?;
5227
5228 Ok(serde_json::json!({
5229 "status": "success",
5230 "analysis_type": "symbol",
5231 "target": symbol_id,
5232 "symbol_info": {
5233 "name": node.name,
5234 "kind": format!("{:?}", node.kind),
5235 "file": node.file.display().to_string(),
5236 "language": format!("{:?}", node.lang)
5237 },
5238 "analysis": analysis,
5239 "settings": {
5240 "analysis_types": analysis_types,
5241 "max_depth": max_depth,
5242 "include_paths": include_paths
5243 }
5244 }))
5245 }
5246
5247 fn analyze_pattern_control_flow(
5249 &self,
5250 pattern: &str,
5251 analysis_types: &[String],
5252 max_depth: usize,
5253 include_paths: bool,
5254 ) -> anyhow::Result<serde_json::Value> {
5255 match &self.repository_path {
5256 Some(repo_path) => {
5257 let glob_pattern = if let Some(stripped) = pattern.strip_prefix("**/") {
5258 repo_path.join(stripped).display().to_string()
5259 } else {
5260 repo_path.join(pattern).display().to_string()
5261 };
5262
5263 let mut all_analyses = Vec::new();
5264 let mut pattern_stats = std::collections::HashMap::new();
5265 let mut files_analyzed = 0;
5266
5267 if let Ok(paths) = glob::glob(&glob_pattern) {
5268 for path in paths.flatten() {
5269 if let Ok(file_analysis) = self.analyze_file_control_flow(
5270 &path.display().to_string(),
5271 analysis_types,
5272 max_depth,
5273 include_paths,
5274 ) {
5275 if let Some(file_stats) = file_analysis.get("file_statistics") {
5276 for (pattern_type, count) in
5277 file_stats.as_object().unwrap_or(&serde_json::Map::new())
5278 {
5279 *pattern_stats.entry(pattern_type.clone()).or_insert(0) +=
5280 count.as_u64().unwrap_or(0) as usize;
5281 }
5282 }
5283 all_analyses.push(file_analysis);
5284 files_analyzed += 1;
5285 }
5286 }
5287 }
5288
5289 Ok(serde_json::json!({
5290 "status": "success",
5291 "analysis_type": "pattern",
5292 "target": pattern,
5293 "files_analyzed": files_analyzed,
5294 "aggregate_statistics": pattern_stats,
5295 "file_analyses": all_analyses,
5296 "settings": {
5297 "analysis_types": analysis_types,
5298 "max_depth": max_depth,
5299 "include_paths": include_paths
5300 }
5301 }))
5302 }
5303 None => Ok(serde_json::json!({
5304 "status": "error",
5305 "message": "No repository configured. Call initialize_repository first."
5306 })),
5307 }
5308 }
5309
5310 fn analyze_node_control_flow(
5312 &self,
5313 node: &codeprism_core::Node,
5314 analysis_types: &[String],
5315 max_depth: usize,
5316 include_paths: bool,
5317 ) -> anyhow::Result<serde_json::Value> {
5318 let mut control_flow_patterns = std::collections::HashMap::new();
5319 let mut execution_paths = Vec::new();
5320 let mut complexity_metrics = std::collections::HashMap::new();
5321
5322 control_flow_patterns.insert("decision_points", self.count_decision_points(node));
5324 control_flow_patterns.insert("loops", self.count_loops(node));
5325 control_flow_patterns.insert("recursions", self.count_recursions(node));
5326 control_flow_patterns.insert("exception_handling", self.count_exception_handling(node));
5327
5328 complexity_metrics.insert(
5330 "cyclomatic_complexity",
5331 self.calculate_cyclomatic_complexity(node),
5332 );
5333 complexity_metrics.insert("depth_of_nesting", self.calculate_nesting_depth(node));
5334 complexity_metrics.insert(
5335 "cognitive_complexity",
5336 self.calculate_cognitive_complexity(node),
5337 );
5338
5339 if include_paths && analysis_types.iter().any(|t| t == "all" || t == "paths") {
5341 execution_paths = self.analyze_execution_paths(node, max_depth)?;
5342 }
5343
5344 let mut issues = Vec::new();
5346 if control_flow_patterns.get("decision_points").unwrap_or(&0) > &10 {
5347 issues.push("High number of decision points - consider refactoring".to_string());
5348 }
5349 if complexity_metrics
5350 .get("cyclomatic_complexity")
5351 .unwrap_or(&0)
5352 > &15
5353 {
5354 issues.push("High cyclomatic complexity - consider breaking down function".to_string());
5355 }
5356 if complexity_metrics.get("depth_of_nesting").unwrap_or(&0) > &4 {
5357 issues.push("Deep nesting detected - consider extraction methods".to_string());
5358 }
5359
5360 Ok(serde_json::json!({
5361 "symbol": {
5362 "id": node.id.to_hex(),
5363 "name": node.name,
5364 "kind": format!("{:?}", node.kind),
5365 "file": node.file.display().to_string(),
5366 "span": {
5367 "start_line": node.span.start_line,
5368 "end_line": node.span.end_line
5369 }
5370 },
5371 "control_flow_patterns": control_flow_patterns,
5372 "complexity_metrics": complexity_metrics,
5373 "execution_paths": execution_paths,
5374 "potential_issues": issues,
5375 "analysis_scope": {
5376 "max_depth": max_depth,
5377 "paths_included": include_paths,
5378 "types_analyzed": analysis_types
5379 }
5380 }))
5381 }
5382
5383 fn count_decision_points(&self, node: &codeprism_core::Node) -> usize {
5385 let outgoing_edges = self.graph_store.get_outgoing_edges(&node.id);
5387 outgoing_edges
5388 .iter()
5389 .filter(|edge| {
5390 matches!(edge.kind, codeprism_core::EdgeKind::Calls)
5391 || matches!(edge.kind, codeprism_core::EdgeKind::Reads)
5392 })
5393 .count()
5394 .max(1) }
5396
5397 fn count_loops(&self, node: &codeprism_core::Node) -> usize {
5399 let outgoing_edges = self.graph_store.get_outgoing_edges(&node.id);
5401 let incoming_edges = self.graph_store.get_incoming_edges(&node.id);
5402
5403 let self_references = outgoing_edges
5405 .iter()
5406 .filter(|edge| edge.target == node.id)
5407 .count();
5408 let mutual_calls = outgoing_edges
5409 .iter()
5410 .filter(|out_edge| {
5411 incoming_edges
5412 .iter()
5413 .any(|in_edge| in_edge.source == out_edge.target)
5414 })
5415 .count();
5416
5417 self_references + mutual_calls.min(3) }
5419
5420 fn count_recursions(&self, node: &codeprism_core::Node) -> usize {
5422 let outgoing_edges = self.graph_store.get_outgoing_edges(&node.id);
5424 outgoing_edges
5425 .iter()
5426 .filter(|edge| {
5427 edge.target == node.id && matches!(edge.kind, codeprism_core::EdgeKind::Calls)
5428 })
5429 .count()
5430 }
5431
5432 fn count_exception_handling(&self, _node: &codeprism_core::Node) -> usize {
5434 0
5437 }
5438
5439 fn calculate_cyclomatic_complexity(&self, node: &codeprism_core::Node) -> usize {
5441 1 + self.count_decision_points(node)
5443 }
5444
5445 fn calculate_nesting_depth(&self, node: &codeprism_core::Node) -> usize {
5447 let span_lines = node.span.end_line.saturating_sub(node.span.start_line);
5449 let complexity = self.count_decision_points(node);
5450
5451 ((span_lines / 10) + complexity / 3).min(10) }
5454
5455 fn calculate_cognitive_complexity(&self, node: &codeprism_core::Node) -> usize {
5457 let cyclomatic = self.calculate_cyclomatic_complexity(node);
5459 let nesting = self.calculate_nesting_depth(node);
5460
5461 cyclomatic + (nesting * 2) }
5463
5464 fn analyze_execution_paths(
5466 &self,
5467 node: &codeprism_core::Node,
5468 max_depth: usize,
5469 ) -> anyhow::Result<Vec<serde_json::Value>> {
5470 let mut paths = Vec::new();
5471 let mut visited = std::collections::HashSet::new();
5472
5473 self.find_execution_paths_recursive(&node.id, &mut paths, &mut visited, max_depth, 0)?;
5475
5476 Ok(paths)
5477 }
5478
5479 fn find_execution_paths_recursive(
5481 &self,
5482 node_id: &codeprism_core::NodeId,
5483 paths: &mut Vec<serde_json::Value>,
5484 visited: &mut std::collections::HashSet<codeprism_core::NodeId>,
5485 max_depth: usize,
5486 current_depth: usize,
5487 ) -> anyhow::Result<()> {
5488 if current_depth >= max_depth || visited.contains(node_id) {
5489 return Ok(());
5490 }
5491
5492 visited.insert(*node_id);
5493
5494 let outgoing_edges = self.graph_store.get_outgoing_edges(node_id);
5495
5496 if outgoing_edges.is_empty() {
5497 if let Some(node) = self.graph_store.get_node(node_id) {
5499 paths.push(serde_json::json!({
5500 "path_type": "terminal",
5501 "endpoint": {
5502 "id": node.id.to_hex(),
5503 "name": node.name,
5504 "kind": format!("{:?}", node.kind)
5505 },
5506 "depth": current_depth
5507 }));
5508 }
5509 } else {
5510 for edge in outgoing_edges.iter().take(5) {
5512 if let Some(target_node) = self.graph_store.get_node(&edge.target) {
5514 paths.push(serde_json::json!({
5515 "path_type": "continuation",
5516 "from": node_id.to_hex(),
5517 "to": target_node.id.to_hex(),
5518 "edge_kind": format!("{:?}", edge.kind),
5519 "target_name": target_node.name,
5520 "depth": current_depth
5521 }));
5522
5523 let mut new_visited = visited.clone();
5525 self.find_execution_paths_recursive(
5526 &edge.target,
5527 paths,
5528 &mut new_visited,
5529 max_depth,
5530 current_depth + 1,
5531 )?;
5532 }
5533 }
5534 }
5535
5536 visited.remove(node_id);
5537 Ok(())
5538 }
5539}
5540
5541#[tool_handler]
5542impl ServerHandler for CodePrismMcpServer {
5543 fn get_info(&self) -> ServerInfo {
5544 ServerInfo {
5545 protocol_version: ProtocolVersion::V_2024_11_05,
5546 capabilities: ServerCapabilities::builder().enable_tools().build(),
5547 server_info: Implementation {
5548 name: self.config.server().name.clone(),
5549 version: self.config.server().version.clone(),
5550 },
5551 instructions: Some(
5552 "CodePrism MCP Server - Advanced code analysis and navigation tools".to_string(),
5553 ),
5554 }
5555 }
5556
5557 async fn initialize(
5558 &self,
5559 _request: InitializeRequestParam,
5560 _context: RequestContext<RoleServer>,
5561 ) -> std::result::Result<InitializeResult, McpError> {
5562 info!("MCP server initialized");
5563 Ok(self.get_info())
5564 }
5565
5566 async fn list_resources(
5567 &self,
5568 _request: Option<PaginatedRequestParam>,
5569 _context: RequestContext<RoleServer>,
5570 ) -> std::result::Result<ListResourcesResult, McpError> {
5571 warn!("Resources not implemented");
5572 Ok(ListResourcesResult {
5573 resources: vec![],
5574 next_cursor: None,
5575 })
5576 }
5577
5578 async fn read_resource(
5579 &self,
5580 _request: ReadResourceRequestParam,
5581 _context: RequestContext<RoleServer>,
5582 ) -> std::result::Result<ReadResourceResult, McpError> {
5583 warn!("Resource reading not implemented");
5584 Err(McpError::invalid_params(
5585 "Resource reading not implemented",
5586 None,
5587 ))
5588 }
5589
5590 async fn list_prompts(
5591 &self,
5592 _request: Option<PaginatedRequestParam>,
5593 _context: RequestContext<RoleServer>,
5594 ) -> std::result::Result<ListPromptsResult, McpError> {
5595 warn!("Prompts not implemented");
5596 Ok(ListPromptsResult {
5597 prompts: vec![],
5598 next_cursor: None,
5599 })
5600 }
5601
5602 async fn get_prompt(
5603 &self,
5604 _request: GetPromptRequestParam,
5605 _context: RequestContext<RoleServer>,
5606 ) -> std::result::Result<GetPromptResult, McpError> {
5607 warn!("Prompt retrieval not implemented");
5608 Err(McpError::invalid_params(
5609 "Prompt retrieval not implemented",
5610 None,
5611 ))
5612 }
5613
5614 async fn list_resource_templates(
5615 &self,
5616 _request: Option<PaginatedRequestParam>,
5617 _context: RequestContext<RoleServer>,
5618 ) -> std::result::Result<ListResourceTemplatesResult, McpError> {
5619 warn!("Resource templates not implemented");
5620 Ok(ListResourceTemplatesResult {
5621 resource_templates: vec![],
5622 next_cursor: None,
5623 })
5624 }
5625}