Skip to main content

aether_core/core/
agent_builder.rs

1use super::agent::{AgentConfig, AutoContinue};
2use crate::agent_spec::AgentSpec;
3use crate::context::CompactionConfig;
4use crate::core::{Agent, Prompt, Result};
5use crate::events::{AgentMessage, UserMessage};
6use crate::mcp::run_mcp_task::McpCommand;
7use llm::parser::ModelProviderParser;
8use llm::types::IsoString;
9use llm::{ChatMessage, Context, StreamingModelProvider, ToolDefinition};
10use std::sync::Arc;
11use std::time::Duration;
12use tokio::sync::mpsc::{self, Receiver, Sender};
13use tokio::task::JoinHandle;
14
15/// Handle for communicating with a running Agent
16pub struct AgentHandle {
17    handle: JoinHandle<()>,
18}
19
20impl AgentHandle {
21    /// Abort the agent task immediately.
22    pub fn abort(&self) {
23        self.handle.abort();
24    }
25
26    /// Returns `true` if the agent task has finished.
27    pub fn is_finished(&self) -> bool {
28        self.handle.is_finished()
29    }
30
31    /// Wait for the agent task to complete.
32    pub async fn await_completion(self) {
33        let _ = self.handle.await;
34    }
35}
36
37pub struct AgentBuilder {
38    llm: Arc<dyn StreamingModelProvider>,
39    prompts: Vec<Prompt>,
40    tool_definitions: Vec<ToolDefinition>,
41    initial_messages: Vec<ChatMessage>,
42    mcp_tx: Option<Sender<McpCommand>>,
43    channel_capacity: usize,
44    tool_timeout: Duration,
45    compaction_config: Option<CompactionConfig>,
46    max_auto_continues: u32,
47    prompt_cache_key: Option<String>,
48}
49
50impl AgentBuilder {
51    pub fn new(llm: Arc<dyn StreamingModelProvider>) -> Self {
52        Self {
53            llm,
54            prompts: Vec::new(),
55            tool_definitions: Vec::new(),
56            initial_messages: Vec::new(),
57            mcp_tx: None,
58            channel_capacity: 1000,
59            tool_timeout: Duration::from_secs(60 * 20),
60            compaction_config: Some(CompactionConfig::default()),
61            max_auto_continues: 3,
62            prompt_cache_key: None,
63        }
64    }
65
66    /// Create a builder from a resolved `AgentSpec`.
67    ///
68    /// The LLM provider is derived from `spec.model` via `ModelProviderParser`.
69    /// `base_prompts` are prepended before the spec's own prompts.
70    pub async fn from_spec(spec: &AgentSpec, base_prompts: Vec<Prompt>) -> Result<Self> {
71        let (provider, _) = ModelProviderParser::default().parse(&spec.model).await?;
72        let mut builder = Self::new(Arc::from(provider));
73        for prompt in base_prompts {
74            builder = builder.system_prompt(prompt);
75        }
76        for prompt in &spec.prompts {
77            builder = builder.system_prompt(prompt.clone());
78        }
79        Ok(builder)
80    }
81
82    /// Add a prompt to the system prompt.
83    ///
84    /// Multiple prompts are concatenated with double newlines.
85    pub fn system_prompt(mut self, prompt: Prompt) -> Self {
86        self.prompts.push(prompt);
87        self
88    }
89
90    pub fn tools(mut self, tx: Sender<McpCommand>, tools: Vec<ToolDefinition>) -> Self {
91        self.tool_definitions = tools;
92        self.mcp_tx = Some(tx);
93        self
94    }
95
96    /// Set the timeout for tool execution
97    ///
98    /// If a tool does not return a result within this duration, it will be marked as failed
99    /// and the agent will continue processing.
100    ///
101    /// Default: 20 minutes
102    pub fn tool_timeout(mut self, timeout: Duration) -> Self {
103        self.tool_timeout = timeout;
104        self
105    }
106
107    /// Configure context compaction settings.
108    ///
109    /// By default, agents automatically compact context when token usage exceeds
110    /// 85% of the context window, preventing overflow during long-running tasks.
111    ///
112    /// # Examples
113    /// ```ignore
114    /// // Custom threshold
115    /// agent(llm).compaction(CompactionConfig::with_threshold(0.9))
116    ///
117    /// // Disable compaction entirely
118    /// agent(llm).compaction(CompactionConfig::disabled())
119    ///
120    /// // Full customization
121    /// agent(llm).compaction(
122    ///     CompactionConfig::with_threshold(0.85)
123    ///         .keep_recent_tool_results(3)
124    ///         .min_messages(20)
125    /// )
126    /// ```
127    pub fn compaction(mut self, config: CompactionConfig) -> Self {
128        self.compaction_config = Some(config);
129        self
130    }
131
132    /// Disable context compaction entirely.
133    ///
134    /// Overflow errors from the model will be surfaced directly to callers.
135    pub fn disable_compaction(mut self) -> Self {
136        self.compaction_config = None;
137        self
138    }
139
140    /// Configure the maximum number of auto-continue attempts.
141    ///
142    /// When the LLM stops without making tool calls, the agent may inject a
143    /// continuation prompt and restart the LLM stream for resumable stop
144    /// reasons (for example, token length limits).
145    ///
146    /// This setting limits how many times the agent will attempt to continue
147    /// before giving up and returning `AgentMessage::Done`.
148    ///
149    /// Default: 3
150    ///
151    /// # Example
152    /// ```ignore
153    /// // Allow up to 5 auto-continue attempts
154    /// agent(llm).max_auto_continues(5)
155    ///
156    /// // Disable auto-continue entirely
157    /// agent(llm).max_auto_continues(0)
158    /// ```
159    pub fn max_auto_continues(mut self, max: u32) -> Self {
160        self.max_auto_continues = max;
161        self
162    }
163
164    /// Set a prompt cache key for LLM provider request routing.
165    ///
166    /// This is typically a session ID (UUID) that remains stable across all
167    /// turns within a conversation, improving prompt cache hit rates.
168    pub fn prompt_cache_key(mut self, key: String) -> Self {
169        self.prompt_cache_key = Some(key);
170        self
171    }
172
173    /// Pre-populate the context with conversation history (e.g. from a restored session).
174    ///
175    /// These messages are inserted after the system prompt.
176    pub fn messages(mut self, messages: Vec<ChatMessage>) -> Self {
177        self.initial_messages = messages;
178        self
179    }
180
181    pub async fn spawn(self) -> Result<(Sender<UserMessage>, Receiver<AgentMessage>, AgentHandle)> {
182        let mut messages = Vec::new();
183
184        if !self.prompts.is_empty() {
185            let system_content = Prompt::build_all(&self.prompts).await?;
186            if !system_content.is_empty() {
187                messages.push(ChatMessage::System { content: system_content, timestamp: IsoString::now() });
188            }
189        }
190
191        messages.extend(self.initial_messages);
192
193        let (user_message_tx, user_message_rx) = mpsc::channel::<UserMessage>(self.channel_capacity);
194
195        let (message_tx, agent_message_rx) = mpsc::channel::<AgentMessage>(self.channel_capacity);
196
197        let mut context = Context::new(messages, self.tool_definitions);
198        context.set_prompt_cache_key(self.prompt_cache_key);
199
200        let config = AgentConfig {
201            llm: self.llm,
202            context,
203            mcp_command_tx: self.mcp_tx,
204            tool_timeout: self.tool_timeout,
205            compaction_config: self.compaction_config,
206            auto_continue: AutoContinue::new(self.max_auto_continues),
207        };
208
209        let agent = Agent::new(config, user_message_rx, message_tx);
210
211        let agent_handle = tokio::spawn(agent.run());
212
213        Ok((user_message_tx, agent_message_rx, AgentHandle { handle: agent_handle }))
214    }
215}
216
217#[cfg(test)]
218mod tests {
219    use super::*;
220    use crate::agent_spec::{AgentSpecExposure, ToolFilter};
221
222    #[tokio::test]
223    async fn test_agent_handle_is_finished() {
224        let handle = AgentHandle { handle: tokio::spawn(async {}) };
225        handle.await_completion().await;
226    }
227
228    #[tokio::test]
229    async fn test_agent_handle_abort() {
230        let handle = AgentHandle {
231            handle: tokio::spawn(async {
232                tokio::time::sleep(Duration::from_secs(60)).await;
233            }),
234        };
235        assert!(!handle.is_finished());
236        handle.abort();
237        // Give the runtime a moment to process the abort
238        tokio::time::sleep(Duration::from_millis(10)).await;
239        assert!(handle.is_finished());
240    }
241
242    #[tokio::test]
243    async fn system_prompt_preserves_add_order() {
244        let builder = AgentBuilder::new(Arc::new(llm::testing::FakeLlmProvider::new(vec![])))
245            .system_prompt(Prompt::text("first"))
246            .system_prompt(Prompt::text("second"))
247            .system_prompt(Prompt::text("third"));
248
249        let rendered = Prompt::build_all(&builder.prompts).await.unwrap();
250
251        assert_eq!(rendered, "first\n\nsecond\n\nthird");
252    }
253
254    #[tokio::test]
255    async fn from_spec_accepts_alloy_model_specs() {
256        let spec = AgentSpec {
257            name: "alloy".to_string(),
258            description: "alloy".to_string(),
259            model: "ollama:llama3.2,llamacpp:local".to_string(),
260            reasoning_effort: None,
261            prompts: vec![],
262            mcp_config_paths: Vec::new(),
263            exposure: AgentSpecExposure::both(),
264            tools: ToolFilter::default(),
265        };
266
267        let builder = AgentBuilder::from_spec(&spec, vec![]).await;
268        assert!(builder.is_ok());
269    }
270}