use crate::agents::Agent;
use crate::llm::LLMClient;
use crate::tools::registry::ToolRegistry;
use crate::types::{AgentContext, AgentType, Result, ToolDefinition};
use crate::utils::toml_config::AgentConfig;
use async_trait::async_trait;
use std::sync::Arc;
pub struct ConfigurableAgent {
name: String,
agent_type: AgentType,
llm: Box<dyn LLMClient>,
system_prompt: String,
tool_registry: Option<Arc<ToolRegistry>>,
allowed_tools: Vec<String>,
max_tool_iterations: usize,
parallel_tools: bool,
}
impl ConfigurableAgent {
pub fn new(
name: &str,
config: &AgentConfig,
llm: Box<dyn LLMClient>,
tool_registry: Option<Arc<ToolRegistry>>,
) -> Self {
let agent_type = Self::name_to_type(name);
let system_prompt = config
.system_prompt
.clone()
.unwrap_or_else(|| Self::default_system_prompt(name));
Self {
name: name.to_string(),
agent_type,
llm,
system_prompt,
tool_registry,
allowed_tools: config.tools.clone(),
max_tool_iterations: config.max_tool_iterations,
parallel_tools: config.parallel_tools,
}
}
#[allow(clippy::too_many_arguments)]
pub fn with_params(
name: &str,
agent_type: AgentType,
llm: Box<dyn LLMClient>,
system_prompt: String,
tool_registry: Option<Arc<ToolRegistry>>,
allowed_tools: Vec<String>,
max_tool_iterations: usize,
parallel_tools: bool,
) -> Self {
Self {
name: name.to_string(),
agent_type,
llm,
system_prompt,
tool_registry,
allowed_tools,
max_tool_iterations,
parallel_tools,
}
}
fn name_to_type(name: &str) -> AgentType {
AgentType::from_string(name)
}
fn default_system_prompt(name: &str) -> String {
match name.to_lowercase().as_str() {
"router" => r#"You are a routing agent that classifies user queries.
Available agents: product, invoice, sales, finance, hr, orchestrator.
Respond with ONLY the agent name (one word, lowercase)."#
.to_string(),
"orchestrator" => r#"You are an orchestrator agent for complex queries.
Break down requests, delegate to specialists, and synthesize results."#
.to_string(),
"product" => r#"You are a Product Agent for product-related queries.
Handle catalog, specifications, inventory, and pricing questions."#
.to_string(),
"invoice" => r#"You are an Invoice Agent for billing queries.
Handle invoices, payments, and billing history."#
.to_string(),
"sales" => r#"You are a Sales Agent for sales analytics.
Handle performance metrics, revenue, and customer data."#
.to_string(),
"finance" => r#"You are a Finance Agent for financial analysis.
Handle statements, budgets, and expense management."#
.to_string(),
"hr" => r#"You are an HR Agent for human resources.
Handle employee info, policies, and benefits."#
.to_string(),
_ => format!("You are a {} agent.", name),
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn max_tool_iterations(&self) -> usize {
self.max_tool_iterations
}
pub fn parallel_tools(&self) -> bool {
self.parallel_tools
}
pub fn has_tools(&self) -> bool {
!self.allowed_tools.is_empty() && self.tool_registry.is_some()
}
pub fn tool_registry(&self) -> Option<&Arc<ToolRegistry>> {
self.tool_registry.as_ref()
}
pub fn allowed_tools(&self) -> &[String] {
&self.allowed_tools
}
pub fn get_filtered_tool_definitions(&self) -> Vec<ToolDefinition> {
match &self.tool_registry {
Some(registry) => {
let allowed: Vec<&str> = self.allowed_tools.iter().map(|s| s.as_str()).collect();
registry.get_tool_definitions_for(&allowed)
}
None => Vec::new(),
}
}
pub fn can_use_tool(&self, tool_name: &str) -> bool {
self.allowed_tools.contains(&tool_name.to_string())
&& self
.tool_registry
.as_ref()
.map(|r| r.is_enabled(tool_name))
.unwrap_or(false)
}
}
#[async_trait]
impl Agent for ConfigurableAgent {
async fn execute(&self, input: &str, context: &AgentContext) -> Result<String> {
let mut messages = vec![("system".to_string(), self.system_prompt.clone())];
if let Some(memory) = &context.user_memory {
let memory_context = format!(
"User preferences: {}",
memory
.preferences
.iter()
.map(|p| format!("{}: {}", p.key, p.value))
.collect::<Vec<_>>()
.join(", ")
);
messages.push(("system".to_string(), memory_context));
}
for msg in context.conversation_history.iter().rev().take(5).rev() {
let role = match msg.role {
crate::types::MessageRole::User => "user",
crate::types::MessageRole::Assistant => "assistant",
_ => "system",
};
messages.push((role.to_string(), msg.content.clone()));
}
messages.push(("user".to_string(), input.to_string()));
self.llm.generate_with_history(&messages).await
}
fn system_prompt(&self) -> String {
self.system_prompt.clone()
}
fn agent_type(&self) -> AgentType {
self.agent_type.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_name_to_type() {
assert!(matches!(
ConfigurableAgent::name_to_type("router"),
AgentType::Router
));
assert!(matches!(
ConfigurableAgent::name_to_type("PRODUCT"),
AgentType::Product
));
assert!(matches!(
ConfigurableAgent::name_to_type("unknown"),
AgentType::Custom(_)
));
if let AgentType::Custom(name) = ConfigurableAgent::name_to_type("my-custom-agent") {
assert_eq!(name, "my-custom-agent");
} else {
panic!("Expected Custom variant");
}
}
#[test]
fn test_default_system_prompt() {
let prompt = ConfigurableAgent::default_system_prompt("router");
assert!(prompt.contains("routing"));
let prompt = ConfigurableAgent::default_system_prompt("product");
assert!(prompt.contains("Product"));
}
#[test]
fn test_allowed_tools() {
use crate::llm::LLMResponse;
use crate::utils::toml_config::AgentConfig;
use std::collections::HashMap;
struct MockLLM;
#[async_trait]
impl LLMClient for MockLLM {
async fn generate(&self, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_history(&self, _: &[(String, String)]) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_tools(
&self,
_: &str,
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn stream(
&self,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_system(
&self,
_: &str,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_history(
&self,
_: &[(String, String)],
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
fn model_name(&self) -> &str {
"mock"
}
async fn generate_with_tools_and_history(
&self,
_: &[crate::llm::coordinator::ConversationMessage],
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
}
let config = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec!["calculator".to_string(), "web_search".to_string()],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent = ConfigurableAgent::new(
"orchestrator",
&config,
Box::new(MockLLM),
None, );
assert_eq!(agent.allowed_tools().len(), 2);
assert!(agent.allowed_tools().contains(&"calculator".to_string()));
assert!(agent.allowed_tools().contains(&"web_search".to_string()));
}
#[test]
fn test_has_tools_requires_both_config_and_registry() {
use crate::llm::LLMResponse;
use crate::utils::toml_config::AgentConfig;
use std::collections::HashMap;
struct MockLLM;
#[async_trait]
impl LLMClient for MockLLM {
async fn generate(&self, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_history(&self, _: &[(String, String)]) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_tools(
&self,
_: &str,
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn stream(
&self,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_system(
&self,
_: &str,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_history(
&self,
_: &[(String, String)],
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
fn model_name(&self) -> &str {
"mock"
}
async fn generate_with_tools_and_history(
&self,
_: &[crate::llm::coordinator::ConversationMessage],
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
}
let config = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec!["calculator".to_string()],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent = ConfigurableAgent::new("orchestrator", &config, Box::new(MockLLM), None);
assert!(!agent.has_tools());
let config_empty = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec![],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent_empty = ConfigurableAgent::new(
"product",
&config_empty,
Box::new(MockLLM),
Some(Arc::new(ToolRegistry::new())),
);
assert!(!agent_empty.has_tools()); }
}