use crate::agents::{Agent, AgentResponse, ExecutionMetadata};
use crate::llm::coordinator::ConversationMessage;
use crate::llm::LLMClient;
use crate::tools::registry::ToolRegistry;
use crate::types::{AgentContext, AgentType, Result, ToolDefinition};
use crate::utils::toml_config::AgentConfig;
use async_trait::async_trait;
use std::sync::Arc;
pub struct ConfigurableAgent {
name: String,
agent_type: AgentType,
llm: Box<dyn LLMClient>,
system_prompt: String,
tool_registry: Option<Arc<ToolRegistry>>,
allowed_tools: Vec<String>,
max_tool_iterations: usize,
parallel_tools: bool,
}
impl ConfigurableAgent {
pub fn new(
name: &str,
config: &AgentConfig,
llm: Box<dyn LLMClient>,
tool_registry: Option<Arc<ToolRegistry>>,
) -> Self {
let agent_type = Self::name_to_type(name);
let system_prompt = config
.system_prompt
.clone()
.unwrap_or_else(|| Self::default_system_prompt(name));
Self {
name: name.to_string(),
agent_type,
llm,
system_prompt,
tool_registry,
allowed_tools: config.tools.clone(),
max_tool_iterations: config.max_tool_iterations,
parallel_tools: config.parallel_tools,
}
}
#[allow(clippy::too_many_arguments)]
pub fn with_params(
name: &str,
agent_type: AgentType,
llm: Box<dyn LLMClient>,
system_prompt: String,
tool_registry: Option<Arc<ToolRegistry>>,
allowed_tools: Vec<String>,
max_tool_iterations: usize,
parallel_tools: bool,
) -> Self {
Self {
name: name.to_string(),
agent_type,
llm,
system_prompt,
tool_registry,
allowed_tools,
max_tool_iterations,
parallel_tools,
}
}
fn name_to_type(name: &str) -> AgentType {
AgentType::from_string(name)
}
fn default_system_prompt(name: &str) -> String {
match name.to_lowercase().as_str() {
"router" => r#"You are a routing agent that classifies user queries.
Available agents: product, invoice, sales, finance, hr, orchestrator.
Respond with ONLY the agent name (one word, lowercase)."#
.to_string(),
"orchestrator" => r#"You are an orchestrator agent for complex queries.
Break down requests, delegate to specialists, and synthesize results."#
.to_string(),
"product" => r#"You are a Product Agent for product-related queries.
Handle catalog, specifications, inventory, and pricing questions."#
.to_string(),
"invoice" => r#"You are an Invoice Agent for billing queries.
Handle invoices, payments, and billing history."#
.to_string(),
"sales" => r#"You are a Sales Agent for sales analytics.
Handle performance metrics, revenue, and customer data."#
.to_string(),
"finance" => r#"You are a Finance Agent for financial analysis.
Handle statements, budgets, and expense management."#
.to_string(),
"hr" => r#"You are an HR Agent for human resources.
Handle employee info, policies, and benefits."#
.to_string(),
_ => format!("You are a {} agent.", name),
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn max_tool_iterations(&self) -> usize {
self.max_tool_iterations
}
pub fn parallel_tools(&self) -> bool {
self.parallel_tools
}
pub fn has_tools(&self) -> bool {
!self.allowed_tools.is_empty() && self.tool_registry.is_some()
}
pub fn tool_registry(&self) -> Option<&Arc<ToolRegistry>> {
self.tool_registry.as_ref()
}
pub fn allowed_tools(&self) -> &[String] {
&self.allowed_tools
}
pub fn get_filtered_tool_definitions(&self) -> Vec<ToolDefinition> {
match &self.tool_registry {
Some(registry) => {
let allowed: Vec<&str> = self.allowed_tools.iter().map(|s| s.as_str()).collect();
registry.get_tool_definitions_for(&allowed)
}
None => Vec::new(),
}
}
pub fn can_use_tool(&self, tool_name: &str) -> bool {
self.allowed_tools.contains(&tool_name.to_string())
&& self
.tool_registry
.as_ref()
.map(|r| r.is_enabled(tool_name))
.unwrap_or(false)
}
async fn execute_with_tools(
&self,
input: &str,
context: &AgentContext,
) -> Result<AgentResponse> {
use crate::llm::client::TokenUsage;
let tools = self.get_filtered_tool_definitions();
tracing::debug!(
agent = %self.name,
allowed_tools = ?self.allowed_tools,
tool_count = tools.len(),
"execute_with_tools: tool definitions loaded"
);
let registry = self.tool_registry.as_ref().unwrap();
let mut messages: Vec<ConversationMessage> = Vec::new();
#[cfg(feature = "eruka-context")]
let effective_prompt = match crate::middleware::eruka_context::get_current_eruka_context() {
Some(eruka_ctx) if !eruka_ctx.is_empty() => {
tracing::debug!(agent = %self.name, ctx_len = eruka_ctx.len(), "External context injected into system prompt");
format!("{}\n\n{}\n\nWhen referencing facts above, cite [E1], [E2] etc.", eruka_ctx, self.system_prompt)
}
_ => self.system_prompt.clone(),
};
#[cfg(not(feature = "eruka-context"))]
let effective_prompt = self.system_prompt.clone();
messages.push(ConversationMessage::system(&effective_prompt));
for msg in context.conversation_history.iter().rev().take(5).rev() {
let cm = match msg.role {
crate::types::MessageRole::User => ConversationMessage::user(&msg.content),
crate::types::MessageRole::Assistant => {
ConversationMessage::assistant(&msg.content, vec![])
}
_ => ConversationMessage::system(&msg.content),
};
messages.push(cm);
}
messages.push(ConversationMessage::user(input));
let mut total_usage = TokenUsage::default();
for _ in 0..self.max_tool_iterations {
let response = self
.llm
.generate_with_tools_and_history(&messages, &tools)
.await?;
if let Some(usage) = &response.usage {
total_usage = TokenUsage::new(
total_usage.prompt_tokens + usage.prompt_tokens,
total_usage.completion_tokens + usage.completion_tokens,
);
}
if response.tool_calls.is_empty() {
return Ok(AgentResponse {
content: response.content,
usage: Some(total_usage),
metadata: Some(ExecutionMetadata {
model_name: self.llm.model_name().to_string(),
provider_name: "openai".to_string(),
}),
});
}
messages.push(ConversationMessage::assistant(
&response.content,
response.tool_calls.clone(),
));
for tc in &response.tool_calls {
let result = registry.execute(&tc.name, tc.arguments.clone()).await;
let result_value = match result {
Ok(v) => v,
Err(e) => serde_json::json!({"error": e.to_string()}),
};
messages.push(ConversationMessage::tool_result(&tc.id, &result_value));
}
}
tracing::warn!(
agent = %self.name,
"Max tool iterations ({}) reached — making final synthesis call",
self.max_tool_iterations
);
let final_response = self
.llm
.generate_with_tools_and_history(&messages, &[])
.await;
let content = match final_response {
Ok(resp) if !resp.content.is_empty() => resp.content,
Ok(_) => {
messages
.iter()
.rev()
.find(|m| m.role == crate::llm::coordinator::MessageRole::Assistant && !m.content.is_empty())
.map(|m| m.content.clone())
.unwrap_or_else(|| "Agent completed tool calls but could not generate a final response.".to_string())
}
Err(e) => {
tracing::error!(error = %e, "Final synthesis call failed");
messages
.iter()
.rev()
.find(|m| m.role == crate::llm::coordinator::MessageRole::Assistant && !m.content.is_empty())
.map(|m| m.content.clone())
.unwrap_or_else(|| format!("Agent completed but synthesis failed: {}", e))
}
};
Ok(AgentResponse {
content,
usage: Some(total_usage),
metadata: Some(ExecutionMetadata {
model_name: self.llm.model_name().to_string(),
provider_name: "openai".to_string(),
}),
})
}
}
#[async_trait]
impl Agent for ConfigurableAgent {
async fn execute(&self, input: &str, context: &AgentContext) -> Result<AgentResponse> {
if self.has_tools() {
tracing::debug!(agent = %self.name, "execute: using tool-calling path");
return self.execute_with_tools(input, context).await;
}
tracing::debug!(agent = %self.name, "execute: no tools, using simple path");
#[cfg(feature = "eruka-context")]
let effective_prompt = match crate::middleware::eruka_context::get_current_eruka_context() {
Some(eruka_ctx) if !eruka_ctx.is_empty() => {
tracing::debug!(agent = %self.name, ctx_len = eruka_ctx.len(), "External context injected into system prompt (simple path)");
format!("{}\n\n{}\n\nWhen referencing facts above, cite [E1], [E2] etc.", eruka_ctx, self.system_prompt)
}
_ => self.system_prompt.clone(),
};
#[cfg(not(feature = "eruka-context"))]
let effective_prompt = self.system_prompt.clone();
let mut messages = vec![("system".to_string(), effective_prompt)];
if let Some(memory) = &context.user_memory {
let memory_context = format!(
"User preferences: {}",
memory
.preferences
.iter()
.map(|p| format!("{}: {}", p.key, p.value))
.collect::<Vec<_>>()
.join(", ")
);
messages.push(("system".to_string(), memory_context));
}
for msg in context.conversation_history.iter().rev().take(5).rev() {
let role = match msg.role {
crate::types::MessageRole::User => "user",
crate::types::MessageRole::Assistant => "assistant",
_ => "system",
};
messages.push((role.to_string(), msg.content.clone()));
}
messages.push(("user".to_string(), input.to_string()));
let llm_response = self.llm.generate_with_history(&messages).await?;
Ok(AgentResponse {
content: llm_response.content,
usage: llm_response.usage,
metadata: Some(ExecutionMetadata {
model_name: self.llm.model_name().to_string(),
provider_name: "openai".to_string(),
}),
})
}
fn system_prompt(&self) -> String {
self.system_prompt.clone()
}
fn agent_type(&self) -> AgentType {
self.agent_type.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_name_to_type() {
assert!(matches!(
ConfigurableAgent::name_to_type("router"),
AgentType::Router
));
assert!(matches!(
ConfigurableAgent::name_to_type("PRODUCT"),
AgentType::Product
));
assert!(matches!(
ConfigurableAgent::name_to_type("unknown"),
AgentType::Custom(_)
));
if let AgentType::Custom(name) = ConfigurableAgent::name_to_type("my-custom-agent") {
assert_eq!(name, "my-custom-agent");
} else {
panic!("Expected Custom variant");
}
}
#[test]
fn test_default_system_prompt() {
let prompt = ConfigurableAgent::default_system_prompt("router");
assert!(prompt.contains("routing"));
let prompt = ConfigurableAgent::default_system_prompt("product");
assert!(prompt.contains("Product"));
}
#[test]
fn test_allowed_tools() {
use crate::llm::LLMResponse;
use crate::utils::toml_config::AgentConfig;
use std::collections::HashMap;
struct MockLLM;
#[async_trait]
impl LLMClient for MockLLM {
async fn generate(&self, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_history(&self, _: &[(String, String)]) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn generate_with_tools(
&self,
_: &str,
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn stream(
&self,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_system(
&self,
_: &str,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_history(
&self,
_: &[(String, String)],
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
fn model_name(&self) -> &str {
"mock"
}
async fn generate_with_tools_and_history(
&self,
_: &[crate::llm::coordinator::ConversationMessage],
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
}
let config = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec!["calculator".to_string(), "web_search".to_string()],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent = ConfigurableAgent::new(
"orchestrator",
&config,
Box::new(MockLLM),
None, );
assert_eq!(agent.allowed_tools().len(), 2);
assert!(agent.allowed_tools().contains(&"calculator".to_string()));
assert!(agent.allowed_tools().contains(&"web_search".to_string()));
}
#[test]
fn test_has_tools_requires_both_config_and_registry() {
use crate::llm::LLMResponse;
use crate::utils::toml_config::AgentConfig;
use std::collections::HashMap;
struct MockLLM;
#[async_trait]
impl LLMClient for MockLLM {
async fn generate(&self, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_system(&self, _: &str, _: &str) -> Result<String> {
Ok("mock".to_string())
}
async fn generate_with_history(&self, _: &[(String, String)]) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn generate_with_tools(
&self,
_: &str,
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
async fn stream(
&self,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_system(
&self,
_: &str,
_: &str,
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
async fn stream_with_history(
&self,
_: &[(String, String)],
) -> Result<Box<dyn futures::Stream<Item = Result<String>> + Send + Unpin>>
{
Ok(Box::new(futures::stream::empty()))
}
fn model_name(&self) -> &str {
"mock"
}
async fn generate_with_tools_and_history(
&self,
_: &[crate::llm::coordinator::ConversationMessage],
_: &[ToolDefinition],
) -> Result<LLMResponse> {
Ok(LLMResponse {
content: "mock".to_string(),
tool_calls: vec![],
finish_reason: "stop".to_string(),
usage: None,
})
}
}
let config = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec!["calculator".to_string()],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent = ConfigurableAgent::new("orchestrator", &config, Box::new(MockLLM), None);
assert!(!agent.has_tools());
let config_empty = AgentConfig {
model: "default".to_string(),
system_prompt: None,
tools: vec![],
max_tool_iterations: 5,
parallel_tools: false,
extra: HashMap::new(),
};
let agent_empty = ConfigurableAgent::new(
"product",
&config_empty,
Box::new(MockLLM),
Some(Arc::new(ToolRegistry::new())),
);
assert!(!agent_empty.has_tools()); }
}