Skip to main content

walrus_daemon/daemon/
builder.rs

1//! Daemon construction and lifecycle methods.
2//!
3//! This module provides the [`Daemon`] builder and reload logic as private
4//! `impl Daemon` methods. [`Daemon::build`] constructs a fully-configured
5//! daemon from a [`DaemonConfig`]. [`Daemon::reload`] rebuilds the runtime
6//! in-place from disk without restarting transports.
7
8use crate::{
9    Daemon, DaemonConfig,
10    daemon::event::{DaemonEvent, DaemonEventSender},
11    hook::{self, DaemonHook, task::TaskRegistry},
12};
13use anyhow::Result;
14use compact_str::CompactString;
15use model::ProviderManager;
16use std::{path::Path, sync::Arc};
17use tokio::sync::{Mutex, RwLock};
18use wcore::{AgentConfig, Runtime, ToolRequest};
19
20const SYSTEM_AGENT: &str = include_str!("../../prompts/walrus.md");
21
22impl Daemon {
23    /// Build a fully-configured [`Daemon`] from the given config, config
24    /// directory, and event sender.
25    pub(crate) async fn build(
26        config: &DaemonConfig,
27        config_dir: &Path,
28        event_tx: DaemonEventSender,
29    ) -> Result<Self> {
30        let runtime = Self::build_runtime(config, config_dir, &event_tx).await?;
31        Ok(Self {
32            runtime: Arc::new(RwLock::new(Arc::new(runtime))),
33            config_dir: config_dir.to_path_buf(),
34            event_tx,
35            agents_config: config.agents.clone(),
36        })
37    }
38
39    /// Rebuild the runtime from disk and swap it in atomically.
40    ///
41    /// In-flight requests that already hold a reference to the old runtime
42    /// complete normally. New requests after the swap see the new runtime.
43    pub async fn reload(&self) -> Result<()> {
44        let config = DaemonConfig::load(&self.config_dir.join("walrus.toml"))?;
45        let new_runtime = Self::build_runtime(&config, &self.config_dir, &self.event_tx).await?;
46        *self.runtime.write().await = Arc::new(new_runtime);
47        tracing::info!("daemon reloaded");
48        Ok(())
49    }
50
51    /// Construct a fresh [`Runtime`] from config. Used by both [`build`] and [`reload`].
52    async fn build_runtime(
53        config: &DaemonConfig,
54        config_dir: &Path,
55        event_tx: &DaemonEventSender,
56    ) -> Result<Runtime<ProviderManager, DaemonHook>> {
57        let manager = Self::build_providers(config).await?;
58        let hook = Self::build_hook(config, config_dir, event_tx).await?;
59        let tool_tx = Self::build_tool_sender(event_tx);
60        let mut runtime = Runtime::new(manager, hook, Some(tool_tx)).await;
61        Self::load_agents(&mut runtime, config_dir, config)?;
62        Ok(runtime)
63    }
64
65    /// Construct the provider manager from config.
66    ///
67    /// Loads a single local model from the registry (if local feature enabled)
68    /// and any remote providers from config. Only one local model is active
69    /// at a time to avoid memory pressure.
70    async fn build_providers(config: &DaemonConfig) -> Result<ProviderManager> {
71        let active_model = config
72            .walrus
73            .model
74            .clone()
75            .ok_or_else(|| anyhow::anyhow!("walrus.model is required in walrus.toml"))?;
76        let manager = ProviderManager::new(active_model.clone());
77
78        // Add the active local model — try registry first, then custom config.
79        #[cfg(feature = "local")]
80        {
81            if let Some(entry) = model::local::registry::find(&active_model) {
82                let local = model::local::registry::build_local(entry);
83                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
84            } else if let Some(entry) = model::local::registry::find_by_key(&active_model) {
85                let local = model::local::registry::build_local(entry);
86                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
87            } else if let Some(hf) = config.model.models.get(active_model.as_str()) {
88                let local = model::local::Local::lazy(
89                    &hf.model_id,
90                    hf.loader,
91                    None,
92                    hf.chat_template.clone(),
93                    hf.gguf_file.as_deref(),
94                );
95                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
96            }
97        }
98
99        // Add remote providers from config.
100        for config in config.model.providers.values() {
101            manager.add_config(config).await?;
102        }
103
104        tracing::info!(
105            "provider manager initialized — active model: {}",
106            manager.active_model_name().unwrap_or_default()
107        );
108        Ok(manager)
109    }
110
111    /// Build the daemon hook with all backends (memory, skills, MCP, tasks).
112    async fn build_hook(
113        config: &DaemonConfig,
114        config_dir: &Path,
115        event_tx: &DaemonEventSender,
116    ) -> Result<DaemonHook> {
117        let memory_dir = config_dir.join("memory");
118        let memory = hook::memory::MemoryHook::open(memory_dir, &config.memory).await?;
119        tracing::info!("memory hook initialized (LanceDB graph)");
120
121        let skills_dir = config_dir.join(wcore::paths::SKILLS_DIR);
122        let skills = hook::skill::SkillHandler::load(skills_dir).unwrap_or_else(|e| {
123            tracing::warn!("failed to load skills: {e}");
124            hook::skill::SkillHandler::default()
125        });
126
127        let mcp_servers = config.mcps.values().cloned().collect::<Vec<_>>();
128        let mcp_handler = hook::mcp::McpHandler::load(&mcp_servers).await;
129
130        let tasks = Arc::new(Mutex::new(TaskRegistry::new(
131            config.tasks.max_concurrent,
132            config.tasks.viewable_window,
133            std::time::Duration::from_secs(config.tasks.task_timeout),
134            event_tx.clone(),
135        )));
136
137        let sandboxed = detect_sandbox();
138        if sandboxed {
139            tracing::info!("sandbox mode active — OS tools bypass permission check");
140        }
141
142        Ok(DaemonHook::new(
143            memory,
144            skills,
145            mcp_handler,
146            tasks,
147            config.permissions.clone(),
148            sandboxed,
149        ))
150    }
151
152    /// Build a [`ToolSender`] that forwards [`ToolRequest`]s into the daemon
153    /// event loop as [`DaemonEvent::ToolCall`] variants.
154    ///
155    /// Spawns a lightweight bridge task relaying from the tool channel into
156    /// the main daemon event channel.
157    fn build_tool_sender(event_tx: &DaemonEventSender) -> wcore::ToolSender {
158        let (tool_tx, mut tool_rx) = tokio::sync::mpsc::unbounded_channel::<ToolRequest>();
159        let event_tx = event_tx.clone();
160        tokio::spawn(async move {
161            while let Some(req) = tool_rx.recv().await {
162                if event_tx.send(DaemonEvent::ToolCall(req)).is_err() {
163                    break;
164                }
165            }
166        });
167        tool_tx
168    }
169
170    /// Load agents and add them to the runtime.
171    ///
172    /// The built-in walrus agent is always registered first. Sub-agents are
173    /// loaded by iterating TOML `[agents.*]` entries and matching each to a
174    /// `.md` prompt file from the agents directory.
175    fn load_agents(
176        runtime: &mut Runtime<ProviderManager, DaemonHook>,
177        config_dir: &Path,
178        config: &DaemonConfig,
179    ) -> Result<()> {
180        // Load prompt files from disk: (filename_stem, text).
181        let prompts = crate::config::load_agents_dir(&config_dir.join(wcore::paths::AGENTS_DIR))?;
182        let prompt_map: std::collections::BTreeMap<String, String> = prompts.into_iter().collect();
183
184        // Built-in walrus agent.
185        let mut walrus_config = config.walrus.clone();
186        walrus_config.name = CompactString::from("walrus");
187        walrus_config.system_prompt = SYSTEM_AGENT.to_owned();
188        runtime.add_agent(walrus_config);
189
190        // Sub-agents from TOML — each must have a matching .md file.
191        for (name, agent_config) in &config.agents {
192            let Some(prompt) = prompt_map.get(name) else {
193                tracing::warn!("agent '{name}' in TOML has no matching .md file, skipping");
194                continue;
195            };
196            let mut agent = agent_config.clone();
197            agent.name = CompactString::from(name.as_str());
198            agent.system_prompt = prompt.clone();
199            tracing::info!("registered agent '{name}' (thinking={})", agent.thinking);
200            runtime.add_agent(agent);
201        }
202
203        // Also register agents that have .md files but no TOML entry (defaults).
204        let default_think = config.walrus.thinking;
205        for (stem, prompt) in &prompt_map {
206            if config.agents.contains_key(stem) {
207                continue;
208            }
209            let mut agent = AgentConfig::new(stem.as_str());
210            agent.system_prompt = prompt.clone();
211            agent.thinking = default_think;
212            tracing::info!("registered agent '{stem}' (defaults, thinking={default_think})");
213            runtime.add_agent(agent);
214        }
215
216        // Populate per-agent scope maps for dispatch enforcement.
217        for agent_config in runtime.agents() {
218            runtime
219                .hook
220                .register_scope(agent_config.name.clone(), &agent_config);
221        }
222
223        Ok(())
224    }
225}
226
227/// Detect sandbox mode by checking if the current process is running as
228/// a user named `walrus`.
229fn detect_sandbox() -> bool {
230    std::env::var("USER")
231        .or_else(|_| std::env::var("LOGNAME"))
232        .is_ok_and(|u| u == "walrus")
233}