Skip to main content

bamboo_agent/server/
app_state.rs

1//! Unified application state management for the Bamboo server
2//!
3//! This module provides the central AppState struct that consolidates all
4//! server state including sessions, storage, LLM providers, tools, and metrics.
5//!
6//! # Architecture
7//!
8//! The AppState uses a unified design that eliminates the proxy pattern where
9//! web_service created an AgentAppState that called back via HTTP. Instead, it
10//! provides direct access to all components.
11//!
12//! ```text
13//! ┌────────────────────────────────────────────────────┐
14//! │              AppState (Unified)                    │
15//! │                                                    │
16//! │  ┌──────────────┐      ┌──────────────┐          │
17//! │  │   Config     │      │   Provider   │          │
18//! │  │  (Hot-reload)│◄────►│   (LLM)      │          │
19//! │  └──────────────┘      └──────────────┘          │
20//! │                                                    │
21//! │  ┌──────────────┐      ┌──────────────┐          │
22//! │  │   Sessions   │      │   Storage    │          │
23//! │  │  (In-memory) │      │  (Persistent)│          │
24//! │  └──────────────┘      └──────────────┘          │
25//! │                                                    │
26//! │  ┌──────────────┐      ┌──────────────┐          │
27//! │  │    Tools     │      │    Skills    │          │
28//! │  │ (Builtin+MCP)│      │   Manager    │          │
29//! │  └──────────────┘      └──────────────┘          │
30//! │                                                    │
31//! │  ┌──────────────┐      ┌──────────────┐          │
32//! │  │     MCP      │      │   Metrics    │          │
33//! │  │   Manager    │      │   Service    │          │
34//! │  └──────────────┘      └──────────────┘          │
35//! └────────────────────────────────────────────────────┘
36//! ```
37//!
38//! # Key Features
39//!
40//! - **Hot-reloadable configuration**: Config and provider can be reloaded at runtime
41//! - **Direct provider access**: No HTTP proxy overhead
42//! - **Session management**: In-memory session cache with persistent storage
43//! - **Tool composition**: Combines built-in and MCP tools
44//! - **Metrics collection**: Integrated metrics and event tracking
45//!
46//! # Usage Example
47//!
48//! ```rust,no_run
49//! use bamboo_agent::server::app_state::AppState;
50//! use std::path::PathBuf;
51//!
52//! #[tokio::main]
53//! async fn main() {
54//!     // Initialize app state
55//!     let app_data_dir = PathBuf::from("/path/to/.bamboo");
56//!     let state = AppState::new(app_data_dir).await;
57//!
58//!     // Access components
59//!     let provider = state.get_provider().await;
60//!     let schemas = state.get_all_tool_schemas();
61//!
62//!     // Hot reload configuration
63//!     state.reload_config().await;
64//!     state.reload_provider().await.ok();
65//! }
66//! ```
67
68use std::collections::HashMap;
69use std::path::PathBuf;
70use std::sync::Arc;
71use std::time::Duration;
72
73use chrono::{DateTime, Utc};
74use tokio::sync::{broadcast, RwLock};
75use tokio_util::sync::CancellationToken;
76
77use crate::agent::core::storage::JsonlStorage;
78use crate::agent::core::tools::ToolExecutor;
79use crate::agent::core::AgentEvent;
80use crate::agent::llm::LLMProvider;
81use crate::agent::mcp::McpServerManager;
82use crate::agent::skill::{SkillManager, SkillStoreConfig};
83use crate::core::Config;
84use crate::process::ProcessRegistry;
85use crate::server::metrics_service::MetricsService;
86
87/// Default system prompt for agent interactions
88pub const DEFAULT_BASE_PROMPT: &str =
89    "You are a helpful AI assistant with access to various tools and skills.";
90
91/// Guidance for workspace-based interactions
92pub const WORKSPACE_PROMPT_GUIDANCE: &str =
93    "If you need to inspect files, check the workspace first, then ~/.bamboo.";
94
95/// Status of an agent execution runner
96///
97/// Represents the lifecycle state of an agent run from initialization
98/// through completion or error.
99#[derive(Debug, Clone)]
100pub enum AgentStatus {
101    /// Agent is initialized but not yet running
102    Pending,
103
104    /// Agent is currently executing
105    Running,
106
107    /// Agent completed successfully
108    Completed,
109
110    /// Agent execution was cancelled by user
111    Cancelled,
112
113    /// Agent execution failed with an error message
114    Error(String),
115}
116
117/// Runner that manages agent execution for a session
118///
119/// Each active agent run has an associated AgentRunner that coordinates
120/// event broadcasting, cancellation, and status tracking.
121///
122/// # Event Broadcasting
123///
124/// Uses a broadcast channel to support multiple subscribers watching
125/// the same agent run simultaneously.
126///
127/// # Cancellation
128///
129/// Provides a cancellation token that can be used to gracefully stop
130/// an in-progress agent execution.
131#[derive(Debug, Clone)]
132pub struct AgentRunner {
133    /// Broadcast sender for agent events
134    ///
135    /// Allows multiple clients to subscribe to agent events
136    /// via `event_sender.subscribe()`.
137    pub event_sender: broadcast::Sender<AgentEvent>,
138
139    /// Cancellation token for graceful shutdown
140    ///
141    /// When triggered, the agent should stop execution at the
142    /// next safe point.
143    pub cancel_token: CancellationToken,
144
145    /// Current status of the agent run
146    pub status: AgentStatus,
147
148    /// Timestamp when the run was started
149    pub started_at: DateTime<Utc>,
150
151    /// Timestamp when the run completed (if finished)
152    pub completed_at: Option<DateTime<Utc>>,
153
154    /// Last token budget event to replay for new subscribers
155    ///
156    /// When a new client subscribes to an ongoing run, this
157    /// allows them to receive the most recent token usage info.
158    pub last_budget_event: Option<AgentEvent>,
159}
160
161impl Default for AgentRunner {
162    fn default() -> Self {
163        Self::new()
164    }
165}
166
167impl AgentRunner {
168    /// Create a new agent runner with default settings
169    ///
170    /// Initializes a broadcast channel with capacity for 1000 events,
171    /// a fresh cancellation token, and Pending status.
172    pub fn new() -> Self {
173        let (event_sender, _) = broadcast::channel(1000);
174        Self {
175            event_sender,
176            cancel_token: CancellationToken::new(),
177            status: AgentStatus::Pending,
178            started_at: Utc::now(),
179            completed_at: None,
180            last_budget_event: None,
181        }
182    }
183}
184
185/// Unified application state consolidating web_service and agent/server state
186///
187/// This struct holds all the state needed to run the Bamboo server, including
188/// configuration, LLM providers, sessions, storage, tools, skills, and metrics.
189///
190/// # Design Goals
191///
192/// - **Direct access**: Components are directly accessible without HTTP proxies
193/// - **Hot reload**: Configuration and providers can be reloaded at runtime
194/// - **Thread safety**: Uses Arc<RwLock> for concurrent access
195/// - **Persistence**: Integrates with JsonlStorage for session persistence
196///
197/// # Component Overview
198///
199/// | Component | Purpose | Thread-Safe |
200/// |-----------|---------|--------------|
201/// | `config` | Application configuration | Yes (RwLock) |
202/// | `provider` | Hot-reloadable LLM provider | Yes (RwLock) |
203/// | `sessions` | Active conversation sessions | Yes (RwLock) |
204/// | `storage` | Persistent session storage | Yes (Arc) |
205/// | `tools` | Tool execution (builtin + MCP) | Yes (Arc) |
206/// | `skill_manager` | Skill registry and execution | Yes (Arc) |
207/// | `mcp_manager` | MCP server lifecycle | Yes (Arc) |
208/// | `metrics_service` | Usage metrics collection | Yes (Arc) |
209/// | `agent_runners` | Active agent executions | Yes (RwLock) |
210pub struct AppState {
211    /// Application data directory (typically ~/.bamboo)
212    pub app_data_dir: PathBuf,
213
214    /// Hot-reloadable application configuration
215    ///
216    /// Can be reloaded from disk at runtime using `reload_config()`.
217    pub config: Arc<RwLock<Config>>,
218
219    /// Hot-reloadable LLM provider with direct access
220    ///
221    /// This eliminates the proxy pattern where we created an AgentAppState
222    /// that called back to web_service via HTTP. Now we have direct provider access.
223    pub provider: Arc<RwLock<Arc<dyn LLMProvider>>>,
224
225    /// Active conversation sessions (in-memory cache)
226    ///
227    /// Maps session IDs to Session objects. Persisted to storage
228    /// via the `storage` field.
229    pub sessions: Arc<RwLock<HashMap<String, crate::agent::core::Session>>>,
230
231    /// Persistent storage backend for sessions
232    ///
233    /// Uses JSONL format for append-only event logging.
234    pub storage: JsonlStorage,
235
236    /// Direct LLM provider reference
237    ///
238    /// This is equivalent to `provider.read().await.clone()`, but stored
239    /// separately for convenience and to avoid lock overhead.
240    pub llm: Arc<dyn LLMProvider>,
241
242    /// Composite tool executor (builtin + MCP tools)
243    ///
244    /// Combines built-in tools (file ops, code execution) with
245    /// MCP-provided tools from configured servers.
246    pub tools: Arc<dyn ToolExecutor>,
247
248    /// Cancellation tokens for in-flight requests
249    ///
250    /// Maps request/session IDs to their cancellation tokens,
251    /// allowing graceful shutdown of long-running operations.
252    pub cancel_tokens: Arc<RwLock<HashMap<String, CancellationToken>>>,
253
254    /// Skill manager for prompt-based skill execution
255    ///
256    /// Manages the skill registry and handles skill lookup,
257    /// validation, and execution.
258    pub skill_manager: Arc<SkillManager>,
259
260    /// MCP server manager for external tool servers
261    ///
262    /// Handles lifecycle of Model Context Protocol servers,
263    /// including initialization, tool discovery, and shutdown.
264    pub mcp_manager: Arc<McpServerManager>,
265
266    /// Metrics collection and persistence service
267    ///
268    /// Tracks token usage, costs, and performance metrics
269    /// across all sessions.
270    pub metrics_service: Arc<MetricsService>,
271
272    /// Default model name for LLM requests
273    ///
274    /// Read from configuration, used as fallback when not
275    /// specified in individual requests.
276    pub model_name: String,
277
278    /// Active agent runners indexed by session ID
279    ///
280    /// Each runner manages event broadcasting and cancellation
281    /// for an active agent execution.
282    pub agent_runners: Arc<RwLock<HashMap<String, AgentRunner>>>,
283
284    /// Registry for tracking external processes (e.g., Claude Code CLI sessions)
285    pub process_registry: Arc<ProcessRegistry>,
286
287    /// Discovered Claude Code CLI binary path (if installed)
288    pub claude_cli_path: Option<String>,
289
290    /// Active Claude Code CLI runners indexed by Claude session ID
291    ///
292    /// These are streamed to clients via SSE under the `/v1/agent/...` endpoints.
293    pub claude_runners: Arc<RwLock<HashMap<String, AgentRunner>>>,
294
295    /// Maps client-provided session ids (aliases) to real Claude UUID session ids.
296    ///
297    /// Claude Code requires session ids to be UUIDs, but some clients/tests use
298    /// human-readable strings. We accept those as aliases and generate a UUID.
299    pub claude_session_aliases: Arc<RwLock<HashMap<String, String>>>,
300
301    /// Optional metrics bus for event streaming
302    ///
303    /// When enabled, allows subscribing to metrics events
304    /// in real-time.
305    pub metrics_bus: Option<crate::agent::metrics::MetricsBus>,
306}
307
308impl AppState {
309    /// Create unified app state with direct provider access
310    ///
311    /// This eliminates the proxy pattern where we created an AgentAppState
312    /// that called back to web_service via HTTP. Now we have direct provider access.
313    ///
314    /// # Arguments
315    ///
316    /// * `bamboo_home_dir` - Bamboo home directory containing all application data.
317    ///                        This is the root directory (e.g., ~/.bamboo) that contains:
318    ///                        - config.json: Configuration file
319    ///                        - sessions/: Conversation history
320    ///                        - skills/: Skill definitions
321    ///                        - workflows/: Workflow definitions
322    ///                        - cache/: Cached data
323    ///                        - runtime/: Runtime files
324    ///
325    /// # Returns
326    ///
327    /// A fully initialized AppState with all components ready for use.
328    ///
329    /// # Panics
330    ///
331    /// Panics if storage initialization fails (critical error).
332    ///
333    /// # Example
334    ///
335    /// ```rust,no_run
336    /// use bamboo_agent::server::app_state::AppState;
337    /// use std::path::PathBuf;
338    ///
339    /// #[tokio::main]
340    /// async fn main() {
341    ///     let state = AppState::new(PathBuf::from("/path/to/.bamboo")).await;
342    ///     println!("Initialized with model: {}", state.model_name);
343    /// }
344    /// ```
345    pub async fn new(bamboo_home_dir: PathBuf) -> Self {
346        // Load config from the specified data directory
347        let config = Config::from_data_dir(Some(bamboo_home_dir.clone()));
348
349        // Create provider with direct access (no HTTP proxy)
350        let provider =
351            match crate::agent::llm::create_provider_with_dir(&config, bamboo_home_dir.clone())
352                .await
353            {
354                Ok(p) => p,
355                Err(e) => {
356                    log::error!(
357                        "Failed to create provider: {}. Using OpenAI as fallback.",
358                        e
359                    );
360                    Arc::new(crate::agent::llm::OpenAIProvider::new(
361                        "sk-test".to_string(),
362                    ))
363                }
364            };
365
366        Self::new_with_provider(bamboo_home_dir, config, provider).await
367    }
368
369    /// Create unified app state with a specific provider
370    ///
371    /// Allows injecting a custom LLM provider instead of creating
372    /// one from configuration. Useful for testing and custom deployments.
373    ///
374    /// # Arguments
375    ///
376    /// * `bamboo_home_dir` - Bamboo home directory containing all application data
377    /// * `config` - Application configuration
378    /// * `provider` - Pre-configured LLM provider implementation
379    ///
380    /// # Returns
381    ///
382    /// A fully initialized AppState with the provided provider.
383    ///
384    /// # Initialization Steps
385    ///
386    /// 1. Initialize JSONL storage in `{bamboo_home_dir}/sessions`
387    /// 4. Load built-in tools
388    /// 5. Initialize MCP manager and load configured servers
389    /// 6. Create composite tool executor (builtin + MCP)
390    /// 7. Initialize skill manager
391    /// 8. Initialize metrics service with SQLite backend
392    /// 9. Start runner cleanup task (removes completed runners after 5 minutes)
393    ///
394    /// # Panics
395    ///
396    /// Panics if storage or metrics initialization fails.
397    pub async fn new_with_provider(
398        bamboo_home_dir: PathBuf,
399        config: Config,
400        provider: Arc<dyn LLMProvider>,
401    ) -> Self {
402        let data_dir = bamboo_home_dir.clone();
403        let sessions_dir = data_dir.join("sessions");
404
405        log::info!("Initializing storage at: {:?}", sessions_dir);
406        let storage = JsonlStorage::new(&sessions_dir);
407        if let Err(e) = storage.init().await {
408            log::error!("Failed to init storage at {:?}: {}", sessions_dir, e);
409            panic!("Failed to init storage: {}", e);
410        }
411        log::info!("Storage initialized successfully at: {:?}", sessions_dir);
412
413        // Discover Claude Code CLI once at startup. This is optional.
414        // We do it early so we can conditionally register the `claude_code` tool.
415        let claude_cli_path =
416            tokio::task::spawn_blocking(|| crate::claude::try_find_claude_binary())
417                .await
418                .ok()
419                .flatten();
420
421        if let Some(ref path) = claude_cli_path {
422            log::info!("Claude Code CLI enabled (found at: {})", path);
423        } else {
424            log::warn!("Claude Code CLI not found; Claude integration disabled");
425        }
426
427        // Wrap config early so tools can reference the hot-reloadable config (e.g. proxy settings).
428        let model_name = config
429            .providers
430            .anthropic
431            .as_ref()
432            .and_then(|p| p.model.as_ref())
433            .cloned()
434            .unwrap_or_else(|| "claude-3-5-sonnet-20241022".to_string());
435
436        let config = Arc::new(RwLock::new(config));
437
438        // Initialize built-in tools (with optional Claude Code tool)
439        let builtin_executor =
440            crate::agent::tools::BuiltinToolExecutor::new_with_config(config.clone());
441        if let Some(ref path) = claude_cli_path {
442            if let Err(e) = builtin_executor.register_tool(
443                crate::agent::tools::tools::ClaudeCodeTool::new(path.clone()),
444            ) {
445                log::warn!("Failed to register claude_code tool: {}", e);
446            }
447        }
448        let builtin_tools: Arc<dyn ToolExecutor> = Arc::new(builtin_executor);
449
450        // Initialize MCP manager
451        let mcp_manager = Arc::new(McpServerManager::new());
452
453        // Try to load MCP config and initialize servers
454        let mcp_config = config.read().await.mcp.clone();
455        mcp_manager.initialize_from_config(&mcp_config).await;
456
457        // Create composite tool executor (builtin + MCP)
458        let mcp_tools = Arc::new(crate::agent::mcp::McpToolExecutor::new(
459            mcp_manager.clone(),
460            mcp_manager.tool_index(),
461        ));
462        let tools: Arc<dyn ToolExecutor> = Arc::new(crate::agent::mcp::CompositeToolExecutor::new(
463            builtin_tools,
464            mcp_tools,
465        ));
466
467        // Initialize skill manager
468        let skill_manager = Arc::new(SkillManager::with_config(SkillStoreConfig {
469            skills_dir: data_dir.join("skills"),
470        }));
471        if let Err(error) = skill_manager.initialize().await {
472            log::warn!("Failed to initialize skill manager: {}", error);
473        }
474
475        // Initialize metrics service
476        let metrics_service = Arc::new(
477            MetricsService::new(data_dir.join("metrics.db"))
478                .await
479                .unwrap_or_else(|error| {
480                    log::error!("Failed to initialize metrics storage: {}", error);
481                    panic!("Failed to init metrics storage: {}", error);
482                }),
483        );
484
485        // Initialize agent runners with cleanup task
486        let agent_runners: Arc<RwLock<HashMap<String, AgentRunner>>> =
487            Arc::new(RwLock::new(HashMap::new()));
488
489        // Start runner cleanup task
490        {
491            let runners = agent_runners.clone();
492            tokio::spawn(async move {
493                loop {
494                    tokio::time::sleep(Duration::from_secs(60)).await;
495
496                    let mut runners_guard = runners.write().await;
497                    let now = Utc::now();
498
499                    runners_guard.retain(|session_id, runner| {
500                        let should_keep = match &runner.status {
501                            AgentStatus::Running => true,
502                            _ => {
503                                let age = now.signed_duration_since(
504                                    runner.completed_at.unwrap_or(runner.started_at),
505                                );
506                                age.num_seconds() < 300 // 5 minute TTL
507                            }
508                        };
509
510                        if !should_keep {
511                            log::debug!("[{}] Cleaning up completed runner", session_id);
512                        }
513
514                        should_keep
515                    });
516                }
517            });
518        }
519
520        // Initialize Claude runners with cleanup task
521        let claude_runners: Arc<RwLock<HashMap<String, AgentRunner>>> =
522            Arc::new(RwLock::new(HashMap::new()));
523
524        {
525            let runners = claude_runners.clone();
526            tokio::spawn(async move {
527                loop {
528                    tokio::time::sleep(Duration::from_secs(60)).await;
529
530                    let mut runners_guard = runners.write().await;
531                    let now = Utc::now();
532
533                    runners_guard.retain(|session_id, runner| {
534                        let should_keep = match &runner.status {
535                            AgentStatus::Running => true,
536                            _ => {
537                                let age = now.signed_duration_since(
538                                    runner.completed_at.unwrap_or(runner.started_at),
539                                );
540                                age.num_seconds() < 300 // 5 minute TTL
541                            }
542                        };
543
544                        if !should_keep {
545                            log::debug!("[claude:{}] Cleaning up completed runner", session_id);
546                        }
547
548                        should_keep
549                    });
550                }
551            });
552        }
553
554        // Initialize process registry (external process lifecycle)
555        let process_registry = Arc::new(ProcessRegistry::new());
556
557        Self {
558            app_data_dir: bamboo_home_dir,
559            config,
560            provider: Arc::new(RwLock::new(provider.clone())),
561            sessions: Arc::new(RwLock::new(HashMap::new())),
562            storage,
563            llm: provider,
564            tools,
565            cancel_tokens: Arc::new(RwLock::new(HashMap::new())),
566            skill_manager,
567            mcp_manager,
568            metrics_service,
569            model_name,
570            agent_runners,
571            process_registry,
572            claude_cli_path,
573            claude_runners,
574            claude_session_aliases: Arc::new(RwLock::new(HashMap::new())),
575            metrics_bus: None, // Will be set by server if needed
576        }
577    }
578
579    /// Reload the provider based on current configuration
580    ///
581    /// Re-reads the configuration and creates a new LLM provider
582    /// instance, allowing runtime switching of providers or models.
583    ///
584    /// # Returns
585    ///
586    /// `Ok(())` if the provider was successfully reloaded.
587    ///
588    /// # Errors
589    ///
590    /// Returns an error if:
591    /// - Configuration cannot be read
592    /// - Provider initialization fails (e.g., invalid API key)
593    ///
594    /// # Example
595    ///
596    /// ```rust,no_run
597    /// use bamboo_agent::server::app_state::AppState;
598    /// use std::path::PathBuf;
599    ///
600    /// #[tokio::main]
601    /// async fn main() {
602    ///     let state = AppState::new(PathBuf::from("/path/to/.bamboo")).await;
603    ///
604    ///     // User updated config file...
605    ///     state.reload_provider().await.expect("Provider reload failed");
606    /// }
607    /// ```
608    pub async fn reload_provider(&self) -> Result<(), crate::agent::llm::LLMError> {
609        let config = self.config.read().await.clone();
610
611        log::info!(
612            "Reloading provider: type={}, model={:?}",
613            config.provider,
614            config
615                .providers
616                .anthropic
617                .as_ref()
618                .and_then(|p| p.model.as_ref())
619        );
620
621        let new_provider =
622            crate::agent::llm::create_provider_with_dir(&config, self.app_data_dir.clone()).await?;
623
624        let mut provider = self.provider.write().await;
625        *provider = new_provider;
626
627        log::info!("Provider reloaded successfully to: {}", config.provider);
628        Ok(())
629    }
630
631    /// Reload the configuration from file
632    ///
633    /// Reads the configuration file again and updates the in-memory
634    /// config. Note: This does NOT automatically reload the provider;
635    /// call `reload_provider()` afterwards if needed.
636    ///
637    /// # Returns
638    ///
639    /// The newly loaded configuration.
640    ///
641    /// # Example
642    ///
643    /// ```rust,no_run
644    /// use bamboo_agent::server::app_state::AppState;
645    /// use std::path::PathBuf;
646    ///
647    /// #[tokio::main]
648    /// async fn main() {
649    ///     let state = AppState::new(PathBuf::from("/path/to/.bamboo")).await;
650    ///
651    ///     // Reload config from disk
652    ///     let new_config = state.reload_config().await;
653    ///
654    ///     // Optionally reload provider with new config
655    ///     state.reload_provider().await.ok();
656    /// }
657    /// ```
658    pub async fn reload_config(&self) -> Config {
659        let new_config = Config::from_data_dir(Some(self.app_data_dir.clone()));
660        let mut config = self.config.write().await;
661        *config = new_config.clone();
662        new_config
663    }
664
665    /// Persist the current in-memory config to disk (`{app_data_dir}/config.json`).
666    ///
667    /// This is the single "exit" for configuration writes in the server runtime.
668    pub async fn persist_config(&self) -> anyhow::Result<()> {
669        let config = self.config.read().await.clone();
670        tokio::task::spawn_blocking(move || config.save())
671            .await
672            .map_err(|e| anyhow::anyhow!("Config save task failed: {e}"))??;
673        Ok(())
674    }
675
676    /// Get a clone of the current provider
677    ///
678    /// Returns a thread-safe reference to the current LLM provider.
679    /// This is the preferred way to access the provider for making requests.
680    ///
681    /// # Returns
682    ///
683    /// An Arc reference to the current provider implementation.
684    ///
685    /// # Example
686    ///
687    /// ```rust,no_run
688    /// use bamboo_agent::server::app_state::AppState;
689    /// use std::path::PathBuf;
690    ///
691    /// #[tokio::main]
692    /// async fn main() {
693    ///     let state = AppState::new(PathBuf::from("/path/to/.bamboo")).await;
694    ///     let provider = state.get_provider().await;
695    ///
696    ///     // Use provider to make LLM requests...
697    /// }
698    /// ```
699    pub async fn get_provider(&self) -> Arc<dyn LLMProvider> {
700        self.provider.read().await.clone()
701    }
702
703    /// Shutdown all MCP servers gracefully
704    ///
705    /// Sends shutdown signals to all running MCP server processes
706    /// and waits for them to terminate cleanly.
707    ///
708    /// This should be called during application shutdown to ensure
709    /// MCP servers are not left running as orphaned processes.
710    #[allow(dead_code)]
711    pub async fn shutdown(&self) {
712        log::info!("Shutting down MCP servers...");
713        self.mcp_manager.shutdown_all().await;
714        log::info!("MCP servers shut down complete");
715    }
716
717    /// Save an agent event to persistent storage
718    ///
719    /// Appends the event to the session's event log in JSONL format.
720    ///
721    /// # Arguments
722    ///
723    /// * `session_id` - Session identifier
724    /// * `event` - Event to save
725    #[allow(dead_code)]
726    pub async fn save_event(&self, session_id: &str, event: &AgentEvent) {
727        let _ = self.storage.append_event(session_id, event).await;
728    }
729
730    /// Save a complete session to persistent storage
731    ///
732    /// Writes the session metadata and all events to the storage backend.
733    ///
734    /// # Arguments
735    ///
736    /// * `session` - Session object to save
737    pub async fn save_session(&self, session: &crate::agent::core::Session) {
738        let _ = self.storage.save_session(session).await;
739    }
740
741    /// Get all tool schemas from the composite tool executor
742    ///
743    /// Returns schemas for both built-in tools and MCP-provided tools.
744    /// These schemas are used to inform the LLM about available tools.
745    ///
746    /// # Returns
747    ///
748    /// Vector of tool schemas in Anthropic's tool definition format.
749    pub fn get_all_tool_schemas(&self) -> Vec<crate::agent::core::tools::ToolSchema> {
750        self.tools.list_tools()
751    }
752}
753
754#[cfg(test)]
755mod tests {
756    use super::*;
757
758    #[tokio::test]
759    async fn test_app_state_creation() {
760        let temp_dir = tempfile::tempdir().unwrap();
761        let state = AppState::new(temp_dir.path().to_path_buf()).await;
762
763        // Verify basic fields
764        assert!(state.sessions.read().await.is_empty());
765        assert!(!state.model_name.is_empty());
766    }
767}