pub mod builder;
pub mod config;
pub mod consumers;
pub mod errors;
pub mod history;
pub mod quick;
pub mod runtime;
pub mod runtime_agent;
pub mod streaming;
pub mod streaming_builder;
pub mod toml_agent;
use anyhow::Result;
use async_trait::async_trait;
use crate::llm::{ChatMessage, Role, ToolSpec};
use crate::tools::{ToolConcurrency, ToolContext};
#[async_trait]
pub trait Agent: Send + Sync {
fn name(&self) -> &str;
fn provider(&self) -> Option<crate::llm::LlmProvider> {
None }
fn apply_config_overrides(&self, _cfg: &mut crate::config::AppConfig) {
}
fn required_completion_tools(&self) -> Option<&Vec<String>> {
None
}
fn max_continuations(&self) -> usize {
2
}
fn continuation_message(&self) -> Option<&str> {
None
}
fn system_prompt(&self) -> Result<String>;
fn available_tools(&self) -> Result<Vec<ToolSpec>>;
fn execute_tool(&self, name: &str, _args: serde_json::Value) -> Result<serde_json::Value> {
Err(anyhow::anyhow!("Tool not found: {}", name))
}
async fn execute_tool_with_context(
&self,
name: &str,
_ctx: ToolContext,
args: serde_json::Value,
) -> Result<serde_json::Value> {
self.execute_tool(name, args)
}
fn tool_concurrency(&self, _name: &str) -> ToolConcurrency {
ToolConcurrency::SerialOnly
}
fn provider_parallel_tool_calls(&self) -> bool {
false
}
fn max_concurrent_tool_executions(&self) -> usize {
1
}
async fn run(&self, user_prompt: &str) -> Result<Session> {
runtime::default_run(self, user_prompt).await
}
async fn run_streaming(
&self,
user_prompt: &str,
consumer: Box<dyn streaming::StreamConsumer>,
) -> Result<Session> {
runtime::default_run_streaming(self, user_prompt, consumer).await
}
async fn run_with_consumers(
&self,
user_prompt: &str,
consumers: Vec<Box<dyn streaming::StreamConsumer>>,
) -> Result<Session> {
let multi = streaming::MultiConsumer::new();
let multi = consumers.into_iter().fold(multi, |m, c| m.add(c));
self.run_streaming(user_prompt, Box::new(multi)).await
}
fn initial_messages(&self, user_prompt: &str) -> Result<Vec<ChatMessage>> {
let system = self.system_prompt()?;
let now = chrono::Utc::now();
Ok(vec![
ChatMessage {
role: Role::System,
name: None,
tool_call_id: None,
content: Some(system),
tool_calls: None,
reasoning: None,
raw_content_blocks: None,
tool_metadata: None,
timestamp: Some(now),
id: None,
provider_response_id: None,
status: None,
},
ChatMessage {
role: Role::User,
name: None,
tool_call_id: None,
content: Some(user_prompt.to_string()),
tool_calls: None,
reasoning: None,
raw_content_blocks: None,
tool_metadata: None,
timestamp: Some(now),
id: None,
provider_response_id: None,
status: None,
},
])
}
async fn continue_session(&self, session_id: &str, user_prompt: &str) -> Result<Session> {
runtime::continue_session_run(self, session_id, user_prompt).await
}
async fn continue_session_streaming(
&self,
session_id: &str,
user_prompt: &str,
consumer: Box<dyn streaming::StreamConsumer>,
) -> Result<Session> {
runtime::continue_session_streaming(self, session_id, user_prompt, consumer).await
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Session {
pub session_id: String,
pub agent_name: String,
pub model: String,
pub messages: Vec<ChatMessage>,
#[serde(default)]
pub started_at: Option<chrono::DateTime<chrono::Utc>>,
#[serde(default)]
pub ended_at: Option<chrono::DateTime<chrono::Utc>>,
#[serde(default)]
pub usage: Option<crate::llm::usage::AggregatedUsage>,
}
pub use builder::{AgentBuilder, ReasoningProvider};
pub use runtime_agent::RuntimeAgent;
pub use toml_agent::TomlAgent;