Skip to main content

ares/agents/
configurable.rs

1//! Configurable Agent implementation
2//!
3//! This module provides a generic agent that can be configured via TOML.
4//! It replaces the hardcoded agent implementations with a flexible,
5//! configuration-driven approach.
6
7use crate::agents::{Agent, AgentResponse, ExecutionMetadata};
8use crate::llm::coordinator::ConversationMessage;
9use crate::llm::LLMClient;
10use crate::tools::registry::ToolRegistry;
11use crate::types::{AgentContext, AgentType, Result, ToolDefinition};
12use crate::utils::toml_config::AgentConfig;
13use async_trait::async_trait;
14use std::sync::Arc;
15
16/// A configurable agent that derives its behavior from TOML configuration
17pub struct ConfigurableAgent {
18    /// The agent's name/type identifier
19    name: String,
20    /// The agent type enum value
21    agent_type: AgentType,
22    /// The LLM client to use for generation
23    llm: Box<dyn LLMClient>,
24    /// The system prompt from configuration
25    system_prompt: String,
26    /// Tools available to this agent
27    tool_registry: Option<Arc<ToolRegistry>>,
28    /// List of tool names this agent is allowed to use
29    allowed_tools: Vec<String>,
30    /// Maximum tool calling iterations
31    max_tool_iterations: usize,
32    /// Whether to execute tools in parallel
33    parallel_tools: bool,
34}
35
36impl ConfigurableAgent {
37    /// Create a new configurable agent from TOML config
38    ///
39    /// # Arguments
40    ///
41    /// * `name` - The agent name (used to determine AgentType)
42    /// * `config` - The agent configuration from ares.toml
43    /// * `llm` - The LLM client (already created from the model config)
44    /// * `tool_registry` - Optional tool registry for tool calling
45    pub fn new(
46        name: &str,
47        config: &AgentConfig,
48        llm: Box<dyn LLMClient>,
49        tool_registry: Option<Arc<ToolRegistry>>,
50    ) -> Self {
51        let agent_type = Self::name_to_type(name);
52        let system_prompt = config
53            .system_prompt
54            .clone()
55            .unwrap_or_else(|| Self::default_system_prompt(name));
56
57        Self {
58            name: name.to_string(),
59            agent_type,
60            llm,
61            system_prompt,
62            tool_registry,
63            allowed_tools: config.tools.clone(),
64            max_tool_iterations: config.max_tool_iterations,
65            parallel_tools: config.parallel_tools,
66        }
67    }
68
69    /// Create a new configurable agent with explicit parameters
70    #[allow(clippy::too_many_arguments)]
71    pub fn with_params(
72        name: &str,
73        agent_type: AgentType,
74        llm: Box<dyn LLMClient>,
75        system_prompt: String,
76        tool_registry: Option<Arc<ToolRegistry>>,
77        allowed_tools: Vec<String>,
78        max_tool_iterations: usize,
79        parallel_tools: bool,
80    ) -> Self {
81        Self {
82            name: name.to_string(),
83            agent_type,
84            llm,
85            system_prompt,
86            tool_registry,
87            allowed_tools,
88            max_tool_iterations,
89            parallel_tools,
90        }
91    }
92
93    /// Convert agent name to AgentType
94    fn name_to_type(name: &str) -> AgentType {
95        AgentType::from_string(name)
96    }
97
98    /// Get default system prompt for an agent type
99    fn default_system_prompt(name: &str) -> String {
100        match name.to_lowercase().as_str() {
101            "router" => r#"You are a routing agent that classifies user queries.
102Available agents: product, invoice, sales, finance, hr, orchestrator.
103Respond with ONLY the agent name (one word, lowercase)."#
104                .to_string(),
105
106            "orchestrator" => r#"You are an orchestrator agent for complex queries.
107Break down requests, delegate to specialists, and synthesize results."#
108                .to_string(),
109
110            "product" => r#"You are a Product Agent for product-related queries.
111Handle catalog, specifications, inventory, and pricing questions."#
112                .to_string(),
113
114            "invoice" => r#"You are an Invoice Agent for billing queries.
115Handle invoices, payments, and billing history."#
116                .to_string(),
117
118            "sales" => r#"You are a Sales Agent for sales analytics.
119Handle performance metrics, revenue, and customer data."#
120                .to_string(),
121
122            "finance" => r#"You are a Finance Agent for financial analysis.
123Handle statements, budgets, and expense management."#
124                .to_string(),
125
126            "hr" => r#"You are an HR Agent for human resources.
127Handle employee info, policies, and benefits."#
128                .to_string(),
129
130            _ => format!("You are a {} agent.", name),
131        }
132    }
133
134    /// Get the agent name
135    pub fn name(&self) -> &str {
136        &self.name
137    }
138
139    /// Get the max tool iterations setting
140    pub fn max_tool_iterations(&self) -> usize {
141        self.max_tool_iterations
142    }
143
144    /// Get the parallel tools setting
145    pub fn parallel_tools(&self) -> bool {
146        self.parallel_tools
147    }
148
149    /// Check if this agent has tools configured
150    pub fn has_tools(&self) -> bool {
151        !self.allowed_tools.is_empty() && self.tool_registry.is_some()
152    }
153
154    /// Get the tool registry (if any)
155    pub fn tool_registry(&self) -> Option<&Arc<ToolRegistry>> {
156        self.tool_registry.as_ref()
157    }
158
159    /// Get the list of allowed tool names for this agent
160    pub fn allowed_tools(&self) -> &[String] {
161        &self.allowed_tools
162    }
163
164    /// Get tool definitions for only this agent's allowed tools
165    ///
166    /// This filters the tool registry to only return tools that:
167    /// 1. Are in this agent's allowed tools list
168    /// 2. Are enabled in the tool registry
169    pub fn get_filtered_tool_definitions(&self) -> Vec<ToolDefinition> {
170        match &self.tool_registry {
171            Some(registry) => {
172                let allowed: Vec<&str> = self.allowed_tools.iter().map(|s| s.as_str()).collect();
173                registry.get_tool_definitions_for(&allowed)
174            }
175            None => Vec::new(),
176        }
177    }
178
179    /// Check if a specific tool is allowed for this agent
180    pub fn can_use_tool(&self, tool_name: &str) -> bool {
181        self.allowed_tools.contains(&tool_name.to_string())
182            && self
183                .tool_registry
184                .as_ref()
185                .map(|r| r.is_enabled(tool_name))
186                .unwrap_or(false)
187    }
188
189    /// Execute the agent with tool-calling support (multi-turn loop).
190    async fn execute_with_tools(
191        &self,
192        input: &str,
193        context: &AgentContext,
194    ) -> Result<AgentResponse> {
195        use crate::llm::client::TokenUsage;
196
197        let tools = self.get_filtered_tool_definitions();
198        tracing::debug!(
199            agent = %self.name,
200            allowed_tools = ?self.allowed_tools,
201            tool_count = tools.len(),
202            "execute_with_tools: tool definitions loaded"
203        );
204        let registry = self.tool_registry.as_ref().unwrap();
205
206        let mut messages: Vec<ConversationMessage> = Vec::new();
207
208        // Inject external context if a ContextProvider is configured
209        // OSS: NoOpContextProvider returns None. Managed: ErukaContextProvider returns knowledge states.
210        #[cfg(feature = "eruka-context")]
211        let effective_prompt = match crate::middleware::eruka_context::get_current_eruka_context() {
212            Some(eruka_ctx) if !eruka_ctx.is_empty() => {
213                tracing::debug!(agent = %self.name, ctx_len = eruka_ctx.len(), "External context injected into system prompt");
214                format!("{}\n\n{}\n\nWhen referencing facts above, cite [E1], [E2] etc.", eruka_ctx, self.system_prompt)
215            }
216            _ => self.system_prompt.clone(),
217        };
218        #[cfg(not(feature = "eruka-context"))]
219        let effective_prompt = self.system_prompt.clone();
220        messages.push(ConversationMessage::system(&effective_prompt));
221
222        // Add recent conversation history (last 5 messages)
223        for msg in context.conversation_history.iter().rev().take(5).rev() {
224            let cm = match msg.role {
225                crate::types::MessageRole::User => ConversationMessage::user(&msg.content),
226                crate::types::MessageRole::Assistant => {
227                    ConversationMessage::assistant(&msg.content, vec![])
228                }
229                _ => ConversationMessage::system(&msg.content),
230            };
231            messages.push(cm);
232        }
233
234        messages.push(ConversationMessage::user(input));
235
236        let mut total_usage = TokenUsage::default();
237
238        for _ in 0..self.max_tool_iterations {
239            let response = self
240                .llm
241                .generate_with_tools_and_history(&messages, &tools)
242                .await?;
243
244            if let Some(usage) = &response.usage {
245                total_usage = TokenUsage::new(
246                    total_usage.prompt_tokens + usage.prompt_tokens,
247                    total_usage.completion_tokens + usage.completion_tokens,
248                );
249            }
250
251            if response.tool_calls.is_empty() {
252                return Ok(AgentResponse {
253                    content: response.content,
254                    usage: Some(total_usage),
255                    metadata: Some(ExecutionMetadata {
256                        model_name: self.llm.model_name().to_string(),
257                        provider_name: "openai".to_string(),
258                    }),
259                });
260            }
261
262            // Add assistant message with tool calls
263            messages.push(ConversationMessage::assistant(
264                &response.content,
265                response.tool_calls.clone(),
266            ));
267
268            // Execute each tool call and add results
269            for tc in &response.tool_calls {
270                let result = registry.execute(&tc.name, tc.arguments.clone()).await;
271                let result_value = match result {
272                    Ok(v) => v,
273                    Err(e) => serde_json::json!({"error": e.to_string()}),
274                };
275                messages.push(ConversationMessage::tool_result(&tc.id, &result_value));
276            }
277        }
278
279        // Max iterations reached — make ONE final LLM call without tools to get synthesis
280        // Bug #7 fix: the last assistant message has empty content (it was a tool-call message).
281        // We need the LLM to synthesize a final response from all the tool results.
282        tracing::warn!(
283            agent = %self.name,
284            "Max tool iterations ({}) reached — making final synthesis call",
285            self.max_tool_iterations
286        );
287        let final_response = self
288            .llm
289            .generate_with_tools_and_history(&messages, &[])
290            .await;
291
292        let content = match final_response {
293            Ok(resp) if !resp.content.is_empty() => resp.content,
294            Ok(_) => {
295                // Final call also returned empty — find any non-empty assistant content
296                messages
297                    .iter()
298                    .rev()
299                    .find(|m| m.role == crate::llm::coordinator::MessageRole::Assistant && !m.content.is_empty())
300                    .map(|m| m.content.clone())
301                    .unwrap_or_else(|| "Agent completed tool calls but could not generate a final response.".to_string())
302            }
303            Err(e) => {
304                tracing::error!(error = %e, "Final synthesis call failed");
305                // Still try to return something useful
306                messages
307                    .iter()
308                    .rev()
309                    .find(|m| m.role == crate::llm::coordinator::MessageRole::Assistant && !m.content.is_empty())
310                    .map(|m| m.content.clone())
311                    .unwrap_or_else(|| format!("Agent completed but synthesis failed: {}", e))
312            }
313        };
314
315        Ok(AgentResponse {
316            content,
317            usage: Some(total_usage),
318            metadata: Some(ExecutionMetadata {
319                model_name: self.llm.model_name().to_string(),
320                provider_name: "openai".to_string(),
321            }),
322        })
323    }
324}
325
326#[async_trait]
327impl Agent for ConfigurableAgent {
328    async fn execute(&self, input: &str, context: &AgentContext) -> Result<AgentResponse> {
329        if self.has_tools() {
330            tracing::debug!(agent = %self.name, "execute: using tool-calling path");
331            return self.execute_with_tools(input, context).await;
332        }
333        tracing::debug!(agent = %self.name, "execute: no tools, using simple path");
334
335        // Build context with conversation history if available
336        // Inject external context if a ContextProvider is configured
337        #[cfg(feature = "eruka-context")]
338        let effective_prompt = match crate::middleware::eruka_context::get_current_eruka_context() {
339            Some(eruka_ctx) if !eruka_ctx.is_empty() => {
340                tracing::debug!(agent = %self.name, ctx_len = eruka_ctx.len(), "External context injected into system prompt (simple path)");
341                format!("{}\n\n{}\n\nWhen referencing facts above, cite [E1], [E2] etc.", eruka_ctx, self.system_prompt)
342            }
343            _ => self.system_prompt.clone(),
344        };
345        #[cfg(not(feature = "eruka-context"))]
346        let effective_prompt = self.system_prompt.clone();
347        let mut messages = vec![("system".to_string(), effective_prompt)];
348
349        // Add user memory if available
350        if let Some(memory) = &context.user_memory {
351            let memory_context = format!(
352                "User preferences: {}",
353                memory
354                    .preferences
355                    .iter()
356                    .map(|p| format!("{}: {}", p.key, p.value))
357                    .collect::<Vec<_>>()
358                    .join(", ")
359            );
360            messages.push(("system".to_string(), memory_context));
361        }
362
363        // Add recent conversation history (last 5 messages)
364        for msg in context.conversation_history.iter().rev().take(5).rev() {
365            let role = match msg.role {
366                crate::types::MessageRole::User => "user",
367                crate::types::MessageRole::Assistant => "assistant",
368                _ => "system",
369            };
370            messages.push((role.to_string(), msg.content.clone()));
371        }
372
373        messages.push(("user".to_string(), input.to_string()));
374
375        let llm_response = self.llm.generate_with_history(&messages).await?;
376        Ok(AgentResponse {
377            content: llm_response.content,
378            usage: llm_response.usage,
379            metadata: Some(ExecutionMetadata {
380                model_name: self.llm.model_name().to_string(),
381                provider_name: "openai".to_string(),
382            }),
383        })
384    }
385
386    fn system_prompt(&self) -> String {
387        self.system_prompt.clone()
388    }
389
390    fn agent_type(&self) -> AgentType {
391        self.agent_type.clone()
392    }
393}
394
395#[cfg(test)]
396mod tests {
397    use super::*;
398
399    #[test]
400    fn test_name_to_type() {
401        assert!(matches!(
402            ConfigurableAgent::name_to_type("router"),
403            AgentType::Router
404        ));
405        assert!(matches!(
406            ConfigurableAgent::name_to_type("PRODUCT"),
407            AgentType::Product
408        ));
409        // Unknown types now return Custom variant
410        assert!(matches!(
411            ConfigurableAgent::name_to_type("unknown"),
412            AgentType::Custom(_)
413        ));
414        // Verify the custom name is preserved
415        if let AgentType::Custom(name) = ConfigurableAgent::name_to_type("my-custom-agent") {
416            assert_eq!(name, "my-custom-agent");
417        } else {
418            panic!("Expected Custom variant");
419        }
420    }
421
422    #[test]
423    fn test_default_system_prompt() {
424        let prompt = ConfigurableAgent::default_system_prompt("router");
425        assert!(prompt.contains("routing"));
426
427        let prompt = ConfigurableAgent::default_system_prompt("product");
428        assert!(prompt.contains("Product"));
429    }
430
431    #[test]
432    fn test_allowed_tools() {
433        use crate::llm::LLMResponse;
434        use crate::utils::toml_config::AgentConfig;
435        use std::collections::HashMap;
436
437        // Create a mock LLM client (we'll use a simple mock)
438        struct MockLLM;
439
440        #[async_trait]
441        impl LLMClient for MockLLM {
442            async fn generate(&self, _: &str) -> Result<String> {
443                Ok("mock".to_string())
444            }
445            async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
446                Ok("mock".to_string())
447            }
448            async fn generate_with_history(&self, _: &[(String, String)]) -> Result<LLMResponse> {
449                Ok(LLMResponse {
450                    content: "mock".to_string(),
451                    tool_calls: vec![],
452                    finish_reason: "stop".to_string(),
453                    usage: None,
454                })
455            }
456            async fn generate_with_tools(
457                &self,
458                _: &str,
459                _: &[ToolDefinition],
460            ) -> Result<LLMResponse> {
461                Ok(LLMResponse {
462                    content: "mock".to_string(),
463                    tool_calls: vec![],
464                    finish_reason: "stop".to_string(),
465                    usage: None,
466                })
467            }
468            async fn stream(
469                &self,
470                _: &str,
471            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
472            {
473                Ok(Box::new(futures::stream::empty()))
474            }
475            async fn stream_with_system(
476                &self,
477                _: &str,
478                _: &str,
479            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
480            {
481                Ok(Box::new(futures::stream::empty()))
482            }
483            async fn stream_with_history(
484                &self,
485                _: &[(String, String)],
486            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
487            {
488                Ok(Box::new(futures::stream::empty()))
489            }
490            fn model_name(&self) -> &str {
491                "mock"
492            }
493            async fn generate_with_tools_and_history(
494                &self,
495                _: &[crate::llm::coordinator::ConversationMessage],
496                _: &[ToolDefinition],
497            ) -> Result<LLMResponse> {
498                Ok(LLMResponse {
499                    content: "mock".to_string(),
500                    tool_calls: vec![],
501                    finish_reason: "stop".to_string(),
502                    usage: None,
503                })
504            }
505        }
506
507        let config = AgentConfig {
508            model: "default".to_string(),
509            system_prompt: None,
510            tools: vec!["calculator".to_string(), "web_search".to_string()],
511            max_tool_iterations: 5,
512            parallel_tools: false,
513            extra: HashMap::new(),
514        };
515
516        let agent = ConfigurableAgent::new(
517            "orchestrator",
518            &config,
519            Box::new(MockLLM),
520            None, // No registry for this test
521        );
522
523        assert_eq!(agent.allowed_tools().len(), 2);
524        assert!(agent.allowed_tools().contains(&"calculator".to_string()));
525        assert!(agent.allowed_tools().contains(&"web_search".to_string()));
526    }
527
528    #[test]
529    fn test_has_tools_requires_both_config_and_registry() {
530        use crate::llm::LLMResponse;
531        use crate::utils::toml_config::AgentConfig;
532        use std::collections::HashMap;
533
534        struct MockLLM;
535
536        #[async_trait]
537        impl LLMClient for MockLLM {
538            async fn generate(&self, _: &str) -> Result<String> {
539                Ok("mock".to_string())
540            }
541            async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
542                Ok("mock".to_string())
543            }
544            async fn generate_with_history(&self, _: &[(String, String)]) -> Result<LLMResponse> {
545                Ok(LLMResponse {
546                    content: "mock".to_string(),
547                    tool_calls: vec![],
548                    finish_reason: "stop".to_string(),
549                    usage: None,
550                })
551            }
552            async fn generate_with_tools(
553                &self,
554                _: &str,
555                _: &[ToolDefinition],
556            ) -> Result<LLMResponse> {
557                Ok(LLMResponse {
558                    content: "mock".to_string(),
559                    tool_calls: vec![],
560                    finish_reason: "stop".to_string(),
561                    usage: None,
562                })
563            }
564            async fn stream(
565                &self,
566                _: &str,
567            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
568            {
569                Ok(Box::new(futures::stream::empty()))
570            }
571            async fn stream_with_system(
572                &self,
573                _: &str,
574                _: &str,
575            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
576            {
577                Ok(Box::new(futures::stream::empty()))
578            }
579            async fn stream_with_history(
580                &self,
581                _: &[(String, String)],
582            ) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
583            {
584                Ok(Box::new(futures::stream::empty()))
585            }
586            fn model_name(&self) -> &str {
587                "mock"
588            }
589            async fn generate_with_tools_and_history(
590                &self,
591                _: &[crate::llm::coordinator::ConversationMessage],
592                _: &[ToolDefinition],
593            ) -> Result<LLMResponse> {
594                Ok(LLMResponse {
595                    content: "mock".to_string(),
596                    tool_calls: vec![],
597                    finish_reason: "stop".to_string(),
598                    usage: None,
599                })
600            }
601        }
602
603        // Agent with tools config but no registry
604        let config = AgentConfig {
605            model: "default".to_string(),
606            system_prompt: None,
607            tools: vec!["calculator".to_string()],
608            max_tool_iterations: 5,
609            parallel_tools: false,
610            extra: HashMap::new(),
611        };
612
613        let agent = ConfigurableAgent::new("orchestrator", &config, Box::new(MockLLM), None);
614        assert!(!agent.has_tools()); // No registry
615
616        // Agent with empty tools
617        let config_empty = AgentConfig {
618            model: "default".to_string(),
619            system_prompt: None,
620            tools: vec![],
621            max_tool_iterations: 5,
622            parallel_tools: false,
623            extra: HashMap::new(),
624        };
625
626        let agent_empty = ConfigurableAgent::new(
627            "product",
628            &config_empty,
629            Box::new(MockLLM),
630            Some(Arc::new(ToolRegistry::new())),
631        );
632        assert!(!agent_empty.has_tools()); // Empty tools list
633    }
634}