Skip to main content

aether_core/core/
agent_builder.rs

1use super::agent::{AgentConfig, AutoContinue};
2use crate::agent_spec::AgentSpec;
3use crate::context::CompactionConfig;
4use crate::core::{Agent, Prompt, Result};
5use crate::events::{AgentMessage, UserMessage};
6use crate::mcp::run_mcp_task::McpCommand;
7use llm::parser::ModelProviderParser;
8use llm::types::IsoString;
9use llm::{ChatMessage, Context, StreamingModelProvider, ToolDefinition};
10use std::sync::Arc;
11use std::time::Duration;
12use tokio::sync::mpsc::{self, Receiver, Sender};
13use tokio::task::JoinHandle;
14
15/// Handle for communicating with a running Agent
16pub struct AgentHandle {
17    handle: JoinHandle<()>,
18}
19
20impl AgentHandle {
21    /// Abort the agent task immediately.
22    pub fn abort(&self) {
23        self.handle.abort();
24    }
25
26    /// Returns `true` if the agent task has finished.
27    pub fn is_finished(&self) -> bool {
28        self.handle.is_finished()
29    }
30
31    /// Wait for the agent task to complete.
32    pub async fn await_completion(self) {
33        let _ = self.handle.await;
34    }
35}
36
37pub struct AgentBuilder {
38    llm: Arc<dyn StreamingModelProvider>,
39    prompts: Vec<Prompt>,
40    tool_definitions: Vec<ToolDefinition>,
41    initial_messages: Vec<ChatMessage>,
42    mcp_tx: Option<Sender<McpCommand>>,
43    channel_capacity: usize,
44    tool_timeout: Duration,
45    compaction_config: Option<CompactionConfig>,
46    max_auto_continues: u32,
47    prompt_cache_key: Option<String>,
48}
49
50impl AgentBuilder {
51    pub fn new(llm: Arc<dyn StreamingModelProvider>) -> Self {
52        Self {
53            llm,
54            prompts: Vec::new(),
55            tool_definitions: Vec::new(),
56            initial_messages: Vec::new(),
57            mcp_tx: None,
58            channel_capacity: 1000,
59            tool_timeout: Duration::from_secs(60 * 20),
60            compaction_config: Some(CompactionConfig::default()),
61            max_auto_continues: 3,
62            prompt_cache_key: None,
63        }
64    }
65
66    /// Create a builder from a resolved `AgentSpec`.
67    ///
68    /// The LLM provider is derived from `spec.model` via `ModelProviderParser`.
69    /// `base_prompts` are prepended before the spec's own prompts.
70    pub fn from_spec(spec: &AgentSpec, base_prompts: Vec<Prompt>) -> Result<Self> {
71        let (provider, _) = ModelProviderParser::default().parse(&spec.model)?;
72        let mut builder = Self::new(Arc::from(provider));
73        for prompt in base_prompts {
74            builder = builder.system_prompt(prompt);
75        }
76        for prompt in &spec.prompts {
77            builder = builder.system_prompt(prompt.clone());
78        }
79        Ok(builder)
80    }
81
82    /// Add a prompt to the system prompt.
83    ///
84    /// Multiple prompts are concatenated with double newlines.
85    pub fn system_prompt(mut self, prompt: Prompt) -> Self {
86        self.prompts.push(prompt);
87        self
88    }
89
90    pub fn tools(mut self, tx: Sender<McpCommand>, tools: Vec<ToolDefinition>) -> Self {
91        self.tool_definitions = tools;
92        self.mcp_tx = Some(tx);
93        self
94    }
95
96    /// Set the timeout for tool execution
97    ///
98    /// If a tool does not return a result within this duration, it will be marked as failed
99    /// and the agent will continue processing.
100    ///
101    /// Default: 20 minutes
102    pub fn tool_timeout(mut self, timeout: Duration) -> Self {
103        self.tool_timeout = timeout;
104        self
105    }
106
107    /// Configure context compaction settings.
108    ///
109    /// By default, agents automatically compact context when token usage exceeds
110    /// 85% of the context window, preventing overflow during long-running tasks.
111    ///
112    /// # Examples
113    /// ```ignore
114    /// // Custom threshold
115    /// agent(llm).compaction(CompactionConfig::with_threshold(0.9))
116    ///
117    /// // Disable compaction entirely
118    /// agent(llm).compaction(CompactionConfig::disabled())
119    ///
120    /// // Full customization
121    /// agent(llm).compaction(
122    ///     CompactionConfig::with_threshold(0.85)
123    ///         .keep_recent_tool_results(3)
124    ///         .min_messages(20)
125    /// )
126    /// ```
127    pub fn compaction(mut self, config: CompactionConfig) -> Self {
128        self.compaction_config = Some(config);
129        self
130    }
131
132    /// Disable context compaction entirely.
133    ///
134    /// Overflow errors from the model will be surfaced directly to callers.
135    pub fn disable_compaction(mut self) -> Self {
136        self.compaction_config = None;
137        self
138    }
139
140    /// Configure the maximum number of auto-continue attempts.
141    ///
142    /// When the LLM stops without making tool calls, the agent may inject a
143    /// continuation prompt and restart the LLM stream for resumable stop
144    /// reasons (for example, token length limits).
145    ///
146    /// This setting limits how many times the agent will attempt to continue
147    /// before giving up and returning `AgentMessage::Done`.
148    ///
149    /// Default: 3
150    ///
151    /// # Example
152    /// ```ignore
153    /// // Allow up to 5 auto-continue attempts
154    /// agent(llm).max_auto_continues(5)
155    ///
156    /// // Disable auto-continue entirely
157    /// agent(llm).max_auto_continues(0)
158    /// ```
159    pub fn max_auto_continues(mut self, max: u32) -> Self {
160        self.max_auto_continues = max;
161        self
162    }
163
164    /// Set a prompt cache key for LLM provider request routing.
165    ///
166    /// This is typically a session ID (UUID) that remains stable across all
167    /// turns within a conversation, improving prompt cache hit rates.
168    pub fn prompt_cache_key(mut self, key: String) -> Self {
169        self.prompt_cache_key = Some(key);
170        self
171    }
172
173    /// Pre-populate the context with conversation history (e.g. from a restored session).
174    ///
175    /// These messages are inserted after the system prompt.
176    pub fn messages(mut self, messages: Vec<ChatMessage>) -> Self {
177        self.initial_messages = messages;
178        self
179    }
180
181    pub async fn spawn(self) -> Result<(Sender<UserMessage>, Receiver<AgentMessage>, AgentHandle)> {
182        let mut messages = Vec::new();
183
184        if !self.prompts.is_empty() {
185            let system_content = Prompt::build_all(&self.prompts).await?;
186            if !system_content.is_empty() {
187                messages.push(ChatMessage::System {
188                    content: system_content,
189                    timestamp: IsoString::now(),
190                });
191            }
192        }
193
194        messages.extend(self.initial_messages);
195
196        let (user_message_tx, user_message_rx) =
197            mpsc::channel::<UserMessage>(self.channel_capacity);
198
199        let (message_tx, agent_message_rx) = mpsc::channel::<AgentMessage>(self.channel_capacity);
200
201        let mut context = Context::new(messages, self.tool_definitions);
202        context.set_prompt_cache_key(self.prompt_cache_key);
203
204        let config = AgentConfig {
205            llm: self.llm,
206            context,
207            mcp_command_tx: self.mcp_tx,
208            tool_timeout: self.tool_timeout,
209            compaction_config: self.compaction_config,
210            auto_continue: AutoContinue::new(self.max_auto_continues),
211        };
212
213        let agent = Agent::new(config, user_message_rx, message_tx);
214
215        let agent_handle = tokio::spawn(agent.run());
216
217        Ok((
218            user_message_tx,
219            agent_message_rx,
220            AgentHandle {
221                handle: agent_handle,
222            },
223        ))
224    }
225}
226
227#[cfg(test)]
228mod tests {
229    use super::*;
230    use crate::agent_spec::{AgentSpecExposure, ToolFilter};
231
232    #[tokio::test]
233    async fn test_agent_handle_is_finished() {
234        let handle = AgentHandle {
235            handle: tokio::spawn(async {}),
236        };
237        handle.await_completion().await;
238    }
239
240    #[tokio::test]
241    async fn test_agent_handle_abort() {
242        let handle = AgentHandle {
243            handle: tokio::spawn(async {
244                tokio::time::sleep(Duration::from_secs(60)).await;
245            }),
246        };
247        assert!(!handle.is_finished());
248        handle.abort();
249        // Give the runtime a moment to process the abort
250        tokio::time::sleep(Duration::from_millis(10)).await;
251        assert!(handle.is_finished());
252    }
253
254    #[tokio::test]
255    async fn system_prompt_preserves_add_order() {
256        let builder = AgentBuilder::new(Arc::new(llm::testing::FakeLlmProvider::new(vec![])))
257            .system_prompt(Prompt::text("first"))
258            .system_prompt(Prompt::text("second"))
259            .system_prompt(Prompt::text("third"));
260
261        let rendered = Prompt::build_all(&builder.prompts).await.unwrap();
262
263        assert_eq!(rendered, "first\n\nsecond\n\nthird");
264    }
265
266    #[test]
267    fn from_spec_accepts_alloy_model_specs() {
268        let spec = AgentSpec {
269            name: "alloy".to_string(),
270            description: "alloy".to_string(),
271            model: "ollama:llama3.2,llamacpp:local".to_string(),
272            reasoning_effort: None,
273            prompts: vec![],
274            mcp_config_path: None,
275            exposure: AgentSpecExposure::both(),
276            tools: ToolFilter::default(),
277        };
278
279        let builder = AgentBuilder::from_spec(&spec, vec![]);
280        assert!(builder.is_ok());
281    }
282}