do_memory_mcp/server/
mod.rs1pub mod audit;
37pub mod cache_warming;
38pub mod rate_limiter;
39pub mod sandbox;
40#[cfg(test)]
41mod tests;
42pub mod tool_definitions;
43pub mod tool_definitions_extended;
44pub mod tools;
45
46use crate::cache::QueryCache;
47use crate::monitoring::{MonitoringConfig, MonitoringEndpoints, MonitoringSystem};
48use crate::server::audit::{AuditConfig, AuditLogger};
49use crate::server::rate_limiter::{ClientId, OperationType, RateLimiter};
50use crate::server::tools::registry::ToolRegistry;
51use crate::types::{ExecutionStats, SandboxConfig};
52use crate::unified_sandbox::UnifiedSandbox;
53use anyhow::Result;
54use do_memory_core::SelfLearningMemory;
55use parking_lot::RwLock;
56use serde_json::Value;
57use std::collections::HashMap;
58use std::sync::Arc;
59use tracing::info;
60
61pub struct MemoryMCPServer {
63 sandbox: Arc<UnifiedSandbox>,
65 tool_registry: Arc<ToolRegistry>,
67 stats: Arc<RwLock<ExecutionStats>>,
69 tool_usage: Arc<RwLock<HashMap<String, usize>>>,
71 memory: Arc<SelfLearningMemory>,
73 monitoring: Arc<MonitoringSystem>,
75 monitoring_endpoints: Arc<MonitoringEndpoints>,
77 #[allow(dead_code)]
79 cache: Arc<QueryCache>,
80 audit_logger: Arc<AuditLogger>,
82 rate_limiter: RateLimiter,
84}
85
86impl MemoryMCPServer {
87 pub async fn new(config: SandboxConfig, memory: Arc<SelfLearningMemory>) -> Result<Self> {
98 let sandbox_backend = sandbox::determine_sandbox_backend();
99 let sandbox = Arc::new(UnifiedSandbox::new(config.clone(), sandbox_backend).await?);
100
101 let tool_registry = Arc::new(tools::registry::create_default_registry());
103
104 let monitoring = Self::initialize_monitoring();
105 let monitoring_endpoints = Arc::new(MonitoringEndpoints::new(Arc::clone(&monitoring)));
106
107 let audit_config = AuditConfig::from_env();
109 let audit_logger = Arc::new(AuditLogger::new(audit_config).await?);
110
111 let core_count = tool_registry.get_core_tools().len();
112 let total_count = tool_registry.total_tool_count();
113
114 let server = Self {
115 sandbox,
116 tool_registry,
117 stats: Arc::new(RwLock::new(ExecutionStats::default())),
118 tool_usage: Arc::new(RwLock::new(HashMap::new())),
119 memory,
120 monitoring,
121 monitoring_endpoints,
122 cache: Arc::new(QueryCache::new()),
123 audit_logger,
124 rate_limiter: RateLimiter::from_env(),
125 };
126
127 info!(
128 "MCP server initialized with {} core tools ({} total tools available)",
129 core_count, total_count
130 );
131 info!("Tools loaded on-demand to reduce token usage (lazy loading enabled)");
132 info!(
133 "Monitoring system initialized (enabled: {})",
134 server.monitoring.config().enabled
135 );
136 info!("Audit logging system initialized");
137 info!(
138 "Rate limiter initialized (enabled: {})",
139 server.rate_limiter.is_enabled()
140 );
141
142 if cache_warming::is_cache_warming_enabled() {
144 info!("Starting cache warming process...");
145 if let Err(e) = cache_warming::warm_cache(
146 &server.memory,
147 &cache_warming::CacheWarmingConfig::from_env(),
148 )
149 .await
150 {
151 tracing::warn!(
152 "Cache warming failed, but continuing with server startup: {}",
153 e
154 );
155 } else {
156 info!("Cache warming completed successfully");
157 }
158 } else {
159 info!("Cache warming disabled, skipping");
160 }
161
162 Ok(server)
163 }
164
165 fn initialize_monitoring() -> Arc<MonitoringSystem> {
166 let monitoring_config = MonitoringConfig::default();
167 Arc::new(MonitoringSystem::new(monitoring_config))
168 }
169
170 pub fn memory(&self) -> Arc<SelfLearningMemory> {
176 Arc::clone(&self.memory)
177 }
178
179 pub fn audit_logger(&self) -> Arc<AuditLogger> {
185 Arc::clone(&self.audit_logger)
186 }
187
188 pub fn rate_limiter(&self) -> &RateLimiter {
194 &self.rate_limiter
195 }
196
197 pub fn client_id_from_args(&self, args: &Value) -> ClientId {
207 args.get("client_id")
208 .and_then(|v| v.as_str())
209 .filter(|s| !s.is_empty())
210 .map(ClientId::from_string)
211 .unwrap_or(ClientId::Unknown)
212 }
213
214 pub fn check_rate_limit(
225 &self,
226 client_id: &ClientId,
227 operation: OperationType,
228 ) -> crate::server::rate_limiter::RateLimitResult {
229 self.rate_limiter.check_rate_limit(client_id, operation)
230 }
231
232 pub fn rate_limit_headers(
242 &self,
243 result: &crate::server::rate_limiter::RateLimitResult,
244 ) -> Vec<(String, String)> {
245 self.rate_limiter.get_headers(result)
246 }
247
248 pub fn rate_limited_headers(
258 &self,
259 result: &crate::server::rate_limiter::RateLimitResult,
260 ) -> Vec<(String, String)> {
261 self.rate_limiter.get_rate_limited_headers(result)
262 }
263}