pub mod audit;
pub mod cache_warming;
pub mod rate_limiter;
#[cfg(test)]
mod tests;
pub mod tool_definitions;
pub mod tool_definitions_extended;
pub mod tools;
use crate::cache::QueryCache;
use crate::monitoring::{MonitoringConfig, MonitoringEndpoints, MonitoringSystem};
use crate::server::audit::{AuditConfig, AuditLogger};
use crate::server::rate_limiter::{ClientId, OperationType, RateLimiter};
use crate::server::tools::registry::ToolRegistry;
use crate::types::{ExecutionStats, SandboxConfig};
use anyhow::Result;
use do_memory_core::SelfLearningMemory;
use parking_lot::RwLock;
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use tracing::info;
#[allow(dead_code)] pub struct MemoryMCPServer {
tool_registry: Arc<ToolRegistry>,
stats: Arc<RwLock<ExecutionStats>>,
tool_usage: Arc<RwLock<HashMap<String, usize>>>,
memory: Arc<SelfLearningMemory>,
monitoring: Arc<MonitoringSystem>,
monitoring_endpoints: Arc<MonitoringEndpoints>,
cache: Arc<QueryCache>,
audit_logger: Arc<AuditLogger>,
rate_limiter: RateLimiter,
}
impl MemoryMCPServer {
pub async fn new(_config: SandboxConfig, memory: Arc<SelfLearningMemory>) -> Result<Self> {
let tool_registry = Arc::new(tools::registry::create_default_registry());
let monitoring = Self::initialize_monitoring();
let monitoring_endpoints = Arc::new(MonitoringEndpoints::new(Arc::clone(&monitoring)));
let audit_config = AuditConfig::from_env();
let audit_logger = Arc::new(AuditLogger::new(audit_config).await?);
let core_count = tool_registry.get_core_tools().len();
let total_count = tool_registry.total_tool_count();
let server = Self {
tool_registry,
stats: Arc::new(RwLock::new(ExecutionStats::default())),
tool_usage: Arc::new(RwLock::new(HashMap::new())),
memory,
monitoring,
monitoring_endpoints,
cache: Arc::new(QueryCache::new()),
audit_logger,
rate_limiter: RateLimiter::from_env(),
};
info!(
"MCP server initialized with {} core tools ({} total tools available)",
core_count, total_count
);
info!("Tools loaded on-demand to reduce token usage (lazy loading enabled)");
info!(
"Monitoring system initialized (enabled: {})",
server.monitoring.config().enabled
);
info!("Audit logging system initialized");
info!(
"Rate limiter initialized (enabled: {})",
server.rate_limiter.is_enabled()
);
if cache_warming::is_cache_warming_enabled() {
info!("Starting cache warming process...");
if let Err(e) = cache_warming::warm_cache(
&server.memory,
&cache_warming::CacheWarmingConfig::from_env(),
)
.await
{
tracing::warn!(
"Cache warming failed, but continuing with server startup: {}",
e
);
} else {
info!("Cache warming completed successfully");
}
} else {
info!("Cache warming disabled, skipping");
}
Ok(server)
}
fn initialize_monitoring() -> Arc<MonitoringSystem> {
let monitoring_config = MonitoringConfig::default();
Arc::new(MonitoringSystem::new(monitoring_config))
}
pub fn memory(&self) -> Arc<SelfLearningMemory> {
Arc::clone(&self.memory)
}
pub fn audit_logger(&self) -> Arc<AuditLogger> {
Arc::clone(&self.audit_logger)
}
pub fn rate_limiter(&self) -> &RateLimiter {
&self.rate_limiter
}
pub fn client_id_from_args(&self, args: &Value) -> ClientId {
args.get("client_id")
.and_then(|v| v.as_str())
.filter(|s| !s.is_empty())
.map(ClientId::from_string)
.unwrap_or(ClientId::Unknown)
}
pub fn check_rate_limit(
&self,
client_id: &ClientId,
operation: OperationType,
) -> crate::server::rate_limiter::RateLimitResult {
self.rate_limiter.check_rate_limit(client_id, operation)
}
pub fn rate_limit_headers(
&self,
result: &crate::server::rate_limiter::RateLimitResult,
) -> Vec<(String, String)> {
self.rate_limiter.get_headers(result)
}
pub fn rate_limited_headers(
&self,
result: &crate::server::rate_limiter::RateLimitResult,
) -> Vec<(String, String)> {
self.rate_limiter.get_rate_limited_headers(result)
}
}