use std::path::PathBuf;
use std::sync::Arc;
use crate::config::Settings;
use crate::indexing::facade::IndexFacade;
pub struct ServeArgs {
pub watch: bool,
pub watch_interval: u64,
pub http: bool,
pub https: bool,
pub bind: String,
}
pub async fn run(
args: ServeArgs,
config: Settings,
settings: Arc<Settings>,
facade: IndexFacade,
index_path: PathBuf,
) {
let ServeArgs {
watch,
watch_interval,
http,
https,
bind,
} = args;
let server_mode = if https {
"https"
} else if http || config.server.mode == "http" {
"http"
} else {
"stdio"
};
let bind_address = if bind != "127.0.0.1:8080" {
bind
} else if https {
"127.0.0.1:8443".to_string()
} else {
config.server.bind.clone()
};
let actual_watch_interval = if watch_interval != 5 {
watch_interval
} else {
config.server.watch_interval
};
match server_mode {
"https" => {
run_https_server(&config, watch, bind_address).await;
}
"http" => {
run_http_server(config, watch, bind_address).await;
}
_ => {
run_stdio_server(
config,
settings,
facade,
index_path,
watch,
actual_watch_interval,
)
.await;
}
}
}
async fn run_https_server(config: &Settings, watch: bool, bind_address: String) {
tracing::info!(target: "mcp", "starting HTTPS server on {bind_address}");
if watch || config.file_watch.enabled {
tracing::debug!(
target: "mcp",
"file watching enabled with {}ms debounce",
config.file_watch.debounce_ms
);
}
#[cfg(feature = "https-server")]
{
use crate::mcp::https_server::serve_https;
if let Err(e) = serve_https(config.clone(), watch, bind_address).await {
eprintln!("HTTPS server error: {e}");
std::process::exit(1);
}
}
#[cfg(not(feature = "https-server"))]
{
eprintln!("HTTPS server support is not compiled in.");
eprintln!("Please rebuild with: cargo build --features https-server");
std::process::exit(1);
}
}
async fn run_http_server(config: Settings, watch: bool, bind_address: String) {
eprintln!("Starting MCP server in HTTP mode");
eprintln!("Bind address: {bind_address}");
if watch || config.file_watch.enabled {
eprintln!(
"File watching: ENABLED (event-driven with {}ms debounce)",
config.file_watch.debounce_ms
);
}
use crate::mcp::http_server::serve_http;
if let Err(e) = serve_http(config, watch, bind_address).await {
eprintln!("HTTP server error: {e}");
std::process::exit(1);
}
}
async fn run_stdio_server(
config: Settings,
settings: Arc<Settings>,
facade: IndexFacade,
index_path: PathBuf,
watch: bool,
actual_watch_interval: u64,
) {
eprintln!("Starting MCP server on stdio transport");
if watch {
eprintln!("Index watching enabled (interval: {actual_watch_interval}s)");
}
eprintln!("To test: npx @modelcontextprotocol/inspector cargo run -- serve");
tracing::debug!(
target: "mcp",
"creating server with facade - symbols: {}, semantic: {}",
facade.symbol_count(),
facade.has_semantic_search()
);
let server = crate::mcp::CodeIntelligenceServer::new(facade);
let document_store_arc = crate::documents::load_from_settings(&config);
let server = if let Some(ref store_arc) = document_store_arc {
tracing::debug!(target: "mcp", "attaching document store to server");
server.with_document_store_arc(store_arc.clone())
} else {
server
};
if watch {
use crate::watcher::HotReloadWatcher;
use std::time::Duration;
let facade_arc = server.get_facade_arc();
let watcher = HotReloadWatcher::new(
facade_arc,
settings.clone(),
Duration::from_secs(actual_watch_interval),
);
tokio::spawn(async move {
watcher.watch().await;
});
eprintln!("Hot-reload watcher started");
}
if watch || config.file_watch.enabled {
use crate::mcp::notifications::NotificationBroadcaster;
use crate::watcher::UnifiedWatcher;
use crate::watcher::handlers::{CodeFileHandler, ConfigFileHandler, DocumentFileHandler};
let broadcaster = Arc::new(NotificationBroadcaster::new(100));
let workspace_root = config
.workspace_root
.clone()
.unwrap_or_else(|| std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")));
let settings_path = workspace_root.join(".codanna/settings.toml");
let debounce_ms = config.file_watch.debounce_ms;
let facade_arc = server.get_facade_arc();
let mut builder = UnifiedWatcher::builder()
.broadcaster(broadcaster.clone())
.indexer(facade_arc.clone())
.index_path(index_path.clone())
.workspace_root(workspace_root.clone())
.debounce_ms(debounce_ms);
builder = builder.handler(CodeFileHandler::new(
facade_arc.clone(),
workspace_root.clone(),
));
match ConfigFileHandler::new(settings_path.clone()) {
Ok(config_handler) => {
builder = builder.handler(config_handler);
}
Err(e) => {
eprintln!("Failed to create config handler: {e}");
}
}
if let Some(store_arc) = document_store_arc {
tracing::debug!(target: "mcp", "adding document handler to watcher");
builder = builder
.document_store(store_arc.clone())
.chunking_config(config.documents.defaults.clone())
.handler(DocumentFileHandler::new(store_arc, workspace_root.clone()));
}
let notification_receiver = broadcaster.subscribe();
let notification_server = server.clone();
match builder.build() {
Ok(unified_watcher) => {
tokio::spawn(async move {
if let Err(e) = unified_watcher.watch().await {
eprintln!("Unified watcher error: {e}");
}
});
eprintln!(
"Unified watcher started (debounce: {debounce_ms}ms, config: {})",
settings_path.display()
);
tokio::spawn(async move {
notification_server
.start_notification_listener(notification_receiver)
.await;
});
}
Err(e) => {
eprintln!("Failed to start unified watcher: {e}");
}
}
}
use rmcp::{ServiceExt, transport::stdio};
let service = server
.serve(stdio())
.await
.map_err(|e| {
eprintln!("Failed to start MCP server: {e}");
std::process::exit(1);
})
.unwrap();
service
.waiting()
.await
.map_err(|e| {
eprintln!("MCP server error: {e}");
std::process::exit(1);
})
.unwrap();
}
pub async fn run_mcp_test(
server_binary: Option<PathBuf>,
cli_config: Option<PathBuf>,
tool: Option<String>,
args: Option<String>,
delay: Option<u64>,
) {
use crate::mcp::client::CodeIntelligenceClient;
let server_path = server_binary
.unwrap_or_else(|| std::env::current_exe().expect("Failed to get current executable path"));
if let Err(e) =
CodeIntelligenceClient::test_server(server_path, cli_config, tool, args, delay).await
{
eprintln!("MCP test failed: {e}");
std::process::exit(1);
}
}