Skip to main content

walrus_daemon/daemon/
builder.rs

1//! Daemon construction and lifecycle methods.
2//!
3//! This module provides the [`Daemon`] builder and reload logic as private
4//! `impl Daemon` methods. [`Daemon::build`] constructs a fully-configured
5//! daemon from a [`DaemonConfig`]. [`Daemon::reload`] rebuilds the runtime
6//! in-place from disk without restarting transports.
7
8use crate::{
9    Daemon, DaemonConfig,
10    daemon::event::{DaemonEvent, DaemonEventSender},
11    ext::hub::DownloadRegistry,
12    hook::{self, DaemonHook, task::TaskRegistry},
13};
14use anyhow::Result;
15use compact_str::CompactString;
16use model::ProviderManager;
17use std::{path::Path, sync::Arc};
18use tokio::sync::{Mutex, RwLock};
19use wcore::{AgentConfig, Runtime, ToolRequest};
20
21const SYSTEM_AGENT: &str = include_str!("../../prompts/walrus.md");
22
23impl Daemon {
24    /// Build a fully-configured [`Daemon`] from the given config, config
25    /// directory, and event sender.
26    pub(crate) async fn build(
27        config: &DaemonConfig,
28        config_dir: &Path,
29        event_tx: DaemonEventSender,
30    ) -> Result<Self> {
31        let runtime = Self::build_runtime(config, config_dir, &event_tx).await?;
32        Ok(Self {
33            runtime: Arc::new(RwLock::new(Arc::new(runtime))),
34            config_dir: config_dir.to_path_buf(),
35            event_tx,
36            agents_config: config.agents.clone(),
37        })
38    }
39
40    /// Rebuild the runtime from disk and swap it in atomically.
41    ///
42    /// In-flight requests that already hold a reference to the old runtime
43    /// complete normally. New requests after the swap see the new runtime.
44    pub async fn reload(&self) -> Result<()> {
45        let config = DaemonConfig::load(&self.config_dir.join("walrus.toml"))?;
46        let new_runtime = Self::build_runtime(&config, &self.config_dir, &self.event_tx).await?;
47        *self.runtime.write().await = Arc::new(new_runtime);
48        tracing::info!("daemon reloaded");
49        Ok(())
50    }
51
52    /// Construct a fresh [`Runtime`] from config. Used by both [`build`] and [`reload`].
53    async fn build_runtime(
54        config: &DaemonConfig,
55        config_dir: &Path,
56        event_tx: &DaemonEventSender,
57    ) -> Result<Runtime<ProviderManager, DaemonHook>> {
58        let manager = Self::build_providers(config).await?;
59        let hook = Self::build_hook(config, config_dir, event_tx).await?;
60        let tool_tx = Self::build_tool_sender(event_tx);
61        let mut runtime = Runtime::new(manager, hook, Some(tool_tx)).await;
62        Self::load_agents(&mut runtime, config_dir, config)?;
63        Ok(runtime)
64    }
65
66    /// Construct the provider manager from config.
67    ///
68    /// Loads a single local model from the registry (if local feature enabled)
69    /// and any remote providers from config. Only one local model is active
70    /// at a time to avoid memory pressure.
71    async fn build_providers(config: &DaemonConfig) -> Result<ProviderManager> {
72        let active_model = config
73            .walrus
74            .model
75            .clone()
76            .ok_or_else(|| anyhow::anyhow!("walrus.model is required in walrus.toml"))?;
77        let manager = ProviderManager::new(active_model.clone());
78
79        // Add the active local model — try registry first, then custom config.
80        #[cfg(feature = "local")]
81        {
82            if let Some(entry) = model::local::registry::find(&active_model) {
83                let local = model::local::registry::build_local(entry);
84                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
85            } else if let Some(entry) = model::local::registry::find_by_key(&active_model) {
86                let local = model::local::registry::build_local(entry);
87                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
88            } else if let Some(hf) = config.model.models.get(active_model.as_str()) {
89                let local = model::local::Local::lazy(
90                    &hf.model_id,
91                    hf.loader,
92                    None,
93                    hf.chat_template.clone(),
94                    hf.gguf_file.as_deref(),
95                );
96                manager.add_provider(active_model.clone(), model::Provider::Local(local))?;
97            }
98        }
99
100        // Add remote providers from config.
101        for config in config.model.providers.values() {
102            manager.add_config(config).await?;
103        }
104
105        tracing::info!(
106            "provider manager initialized — active model: {}",
107            manager.active_model_name().unwrap_or_default()
108        );
109        Ok(manager)
110    }
111
112    /// Build the daemon hook with all backends (memory, skills, MCP, tasks, downloads).
113    async fn build_hook(
114        config: &DaemonConfig,
115        config_dir: &Path,
116        event_tx: &DaemonEventSender,
117    ) -> Result<DaemonHook> {
118        let downloads = Arc::new(Mutex::new(DownloadRegistry::new()));
119
120        // Pre-download embeddings model files so MemoryHook::open() finds them cached.
121        if let Err(e) = crate::ext::hub::embeddings::pre_download(&downloads).await {
122            tracing::warn!("embeddings pre-download failed (memory may be degraded): {e}");
123        }
124
125        let memory_dir = config_dir.join("memory");
126        let memory = hook::memory::MemoryHook::open(memory_dir, &config.memory).await?;
127        tracing::info!("memory hook initialized (LanceDB graph)");
128
129        let skills_dir = config_dir.join(wcore::paths::SKILLS_DIR);
130        let skills = hook::skill::SkillHandler::load(skills_dir).unwrap_or_else(|e| {
131            tracing::warn!("failed to load skills: {e}");
132            hook::skill::SkillHandler::default()
133        });
134
135        let mcp_servers = config.mcps.values().cloned().collect::<Vec<_>>();
136        let mcp_handler = hook::mcp::McpHandler::load(&mcp_servers).await;
137
138        let tasks = Arc::new(Mutex::new(TaskRegistry::new(
139            config.tasks.max_concurrent,
140            config.tasks.viewable_window,
141            std::time::Duration::from_secs(config.tasks.task_timeout),
142            event_tx.clone(),
143        )));
144
145        let sandboxed = detect_sandbox();
146        if sandboxed {
147            tracing::info!("sandbox mode active — OS tools bypass permission check");
148        }
149
150        let aggregator = wsearch::aggregator::Aggregator::new(config.search.clone())
151            .map_err(|e| anyhow::anyhow!("search init failed: {e}"))?;
152        let fetch_client = wsearch::browser::fetch::default_client()
153            .map_err(|e| anyhow::anyhow!("fetch client init failed: {e}"))?;
154        tracing::info!("search tools initialized");
155
156        Ok(DaemonHook::new(
157            memory,
158            skills,
159            mcp_handler,
160            tasks,
161            downloads,
162            config.permissions.clone(),
163            sandboxed,
164            aggregator,
165            fetch_client,
166        ))
167    }
168
169    /// Build a [`ToolSender`] that forwards [`ToolRequest`]s into the daemon
170    /// event loop as [`DaemonEvent::ToolCall`] variants.
171    ///
172    /// Spawns a lightweight bridge task relaying from the tool channel into
173    /// the main daemon event channel.
174    fn build_tool_sender(event_tx: &DaemonEventSender) -> wcore::ToolSender {
175        let (tool_tx, mut tool_rx) = tokio::sync::mpsc::unbounded_channel::<ToolRequest>();
176        let event_tx = event_tx.clone();
177        tokio::spawn(async move {
178            while let Some(req) = tool_rx.recv().await {
179                if event_tx.send(DaemonEvent::ToolCall(req)).is_err() {
180                    break;
181                }
182            }
183        });
184        tool_tx
185    }
186
187    /// Load agents and add them to the runtime.
188    ///
189    /// The built-in walrus agent is always registered first. Sub-agents are
190    /// loaded by iterating TOML `[agents.*]` entries and matching each to a
191    /// `.md` prompt file from the agents directory.
192    fn load_agents(
193        runtime: &mut Runtime<ProviderManager, DaemonHook>,
194        config_dir: &Path,
195        config: &DaemonConfig,
196    ) -> Result<()> {
197        // Load prompt files from disk: (filename_stem, text).
198        let prompts = crate::config::load_agents_dir(&config_dir.join(wcore::paths::AGENTS_DIR))?;
199        let prompt_map: std::collections::BTreeMap<String, String> = prompts.into_iter().collect();
200
201        // Built-in walrus agent.
202        let mut walrus_config = config.walrus.clone();
203        walrus_config.name = CompactString::from("walrus");
204        walrus_config.system_prompt = SYSTEM_AGENT.to_owned();
205        runtime.add_agent(walrus_config);
206
207        // Sub-agents from TOML — each must have a matching .md file.
208        for (name, agent_config) in &config.agents {
209            let Some(prompt) = prompt_map.get(name) else {
210                tracing::warn!("agent '{name}' in TOML has no matching .md file, skipping");
211                continue;
212            };
213            let mut agent = agent_config.clone();
214            agent.name = CompactString::from(name.as_str());
215            agent.system_prompt = prompt.clone();
216            tracing::info!("registered agent '{name}' (thinking={})", agent.thinking);
217            runtime.add_agent(agent);
218        }
219
220        // Also register agents that have .md files but no TOML entry (defaults).
221        let default_think = config.walrus.thinking;
222        for (stem, prompt) in &prompt_map {
223            if config.agents.contains_key(stem) {
224                continue;
225            }
226            let mut agent = AgentConfig::new(stem.as_str());
227            agent.system_prompt = prompt.clone();
228            agent.thinking = default_think;
229            tracing::info!("registered agent '{stem}' (defaults, thinking={default_think})");
230            runtime.add_agent(agent);
231        }
232
233        // Populate per-agent scope maps for dispatch enforcement.
234        for agent_config in runtime.agents() {
235            runtime
236                .hook
237                .register_scope(agent_config.name.clone(), &agent_config);
238        }
239
240        Ok(())
241    }
242}
243
244/// Detect sandbox mode by checking if the current process is running as
245/// a user named `walrus`.
246fn detect_sandbox() -> bool {
247    std::env::var("USER")
248        .or_else(|_| std::env::var("LOGNAME"))
249        .is_ok_and(|u| u == "walrus")
250}