Skip to main content

tirea_agent_loop/runtime/loop_runner/
config.rs

1use super::tool_exec::ParallelToolExecutor;
2use super::AgentLoopError;
3use crate::contracts::runtime::plugin::AgentPlugin;
4use crate::contracts::runtime::ToolExecutor;
5use crate::contracts::runtime::tool_call::{Tool, ToolDescriptor};
6use crate::contracts::RunContext;
7use async_trait::async_trait;
8use genai::chat::ChatOptions;
9use genai::Client;
10use std::collections::HashMap;
11use std::sync::Arc;
12
13/// Retry strategy for LLM inference calls.
14#[derive(Debug, Clone)]
15pub struct LlmRetryPolicy {
16    /// Max attempts per model candidate (must be >= 1).
17    pub max_attempts_per_model: usize,
18    /// Initial backoff for retries in milliseconds.
19    pub initial_backoff_ms: u64,
20    /// Max backoff cap in milliseconds.
21    pub max_backoff_ms: u64,
22    /// Retry stream startup failures before any output is emitted.
23    pub retry_stream_start: bool,
24}
25
26impl Default for LlmRetryPolicy {
27    fn default() -> Self {
28        Self {
29            max_attempts_per_model: 2,
30            initial_backoff_ms: 250,
31            max_backoff_ms: 2_000,
32            retry_stream_start: true,
33        }
34    }
35}
36
37/// Input context passed to per-step tool providers.
38pub struct StepToolInput<'a> {
39    /// Current run context at step boundary.
40    pub state: &'a RunContext,
41}
42
43/// Tool snapshot resolved for one step.
44#[derive(Clone, Default)]
45pub struct StepToolSnapshot {
46    /// Concrete tool map used for this step.
47    pub tools: HashMap<String, Arc<dyn Tool>>,
48    /// Tool descriptors exposed to plugins/LLM for this step.
49    pub descriptors: Vec<ToolDescriptor>,
50}
51
52impl StepToolSnapshot {
53    /// Build a step snapshot from a concrete tool map.
54    pub fn from_tools(tools: HashMap<String, Arc<dyn Tool>>) -> Self {
55        let descriptors = tools
56            .values()
57            .map(|tool| tool.descriptor().clone())
58            .collect();
59        Self { tools, descriptors }
60    }
61}
62
63/// Provider that resolves the tool snapshot for each step.
64#[async_trait]
65pub trait StepToolProvider: Send + Sync {
66    /// Resolve tool map + descriptors for the current step.
67    async fn provide(&self, input: StepToolInput<'_>) -> Result<StepToolSnapshot, AgentLoopError>;
68}
69
70/// Boxed stream of LLM chat events.
71pub type LlmEventStream = std::pin::Pin<
72    Box<dyn futures::Stream<Item = Result<genai::chat::ChatStreamEvent, genai::Error>> + Send>,
73>;
74
75/// Abstraction over LLM inference backends.
76///
77/// The agent loop calls this trait for both non-streaming (`exec_chat_response`)
78/// and streaming (`exec_chat_stream_events`) inference.  The default
79/// implementation ([`GenaiLlmExecutor`]) delegates to `genai::Client`.
80#[async_trait]
81pub trait LlmExecutor: Send + Sync {
82    /// Run a non-streaming chat completion.
83    async fn exec_chat_response(
84        &self,
85        model: &str,
86        chat_req: genai::chat::ChatRequest,
87        options: Option<&genai::chat::ChatOptions>,
88    ) -> genai::Result<genai::chat::ChatResponse>;
89
90    /// Run a streaming chat completion, returning a boxed event stream.
91    async fn exec_chat_stream_events(
92        &self,
93        model: &str,
94        chat_req: genai::chat::ChatRequest,
95        options: Option<&genai::chat::ChatOptions>,
96    ) -> genai::Result<LlmEventStream>;
97
98    /// Stable label for logging / debug output.
99    fn name(&self) -> &'static str;
100}
101
102/// Default LLM executor backed by `genai::Client`.
103#[derive(Clone)]
104pub struct GenaiLlmExecutor {
105    client: Client,
106}
107
108impl GenaiLlmExecutor {
109    pub fn new(client: Client) -> Self {
110        Self { client }
111    }
112}
113
114impl std::fmt::Debug for GenaiLlmExecutor {
115    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116        f.debug_struct("GenaiLlmExecutor").finish()
117    }
118}
119
120#[async_trait]
121impl LlmExecutor for GenaiLlmExecutor {
122    async fn exec_chat_response(
123        &self,
124        model: &str,
125        chat_req: genai::chat::ChatRequest,
126        options: Option<&ChatOptions>,
127    ) -> genai::Result<genai::chat::ChatResponse> {
128        self.client.exec_chat(model, chat_req, options).await
129    }
130
131    async fn exec_chat_stream_events(
132        &self,
133        model: &str,
134        chat_req: genai::chat::ChatRequest,
135        options: Option<&ChatOptions>,
136    ) -> genai::Result<LlmEventStream> {
137        let resp = self
138            .client
139            .exec_chat_stream(model, chat_req, options)
140            .await?;
141        Ok(Box::pin(resp.stream))
142    }
143
144    fn name(&self) -> &'static str {
145        "genai_client"
146    }
147}
148
149/// Static provider that always returns the same tool map.
150#[derive(Clone, Default)]
151pub struct StaticStepToolProvider {
152    tools: HashMap<String, Arc<dyn Tool>>,
153}
154
155impl StaticStepToolProvider {
156    pub fn new(tools: HashMap<String, Arc<dyn Tool>>) -> Self {
157        Self { tools }
158    }
159}
160
161#[async_trait]
162impl StepToolProvider for StaticStepToolProvider {
163    async fn provide(&self, _input: StepToolInput<'_>) -> Result<StepToolSnapshot, AgentLoopError> {
164        Ok(StepToolSnapshot::from_tools(self.tools.clone()))
165    }
166}
167
168/// Runtime configuration for the agent loop.
169#[derive(Clone)]
170pub struct AgentConfig {
171    /// Unique identifier for this agent.
172    pub id: String,
173    /// Model identifier (e.g., "gpt-4", "claude-3-opus").
174    pub model: String,
175    /// System prompt for the LLM.
176    pub system_prompt: String,
177    /// Optional loop-budget hint (core loop does not enforce this directly).
178    pub max_rounds: usize,
179    /// Tool execution strategy (parallel, sequential, or custom).
180    pub tool_executor: Arc<dyn ToolExecutor>,
181    /// Chat options for the LLM.
182    pub chat_options: Option<ChatOptions>,
183    /// Fallback model ids used when the primary model fails.
184    ///
185    /// Evaluated in order after `model`.
186    pub fallback_models: Vec<String>,
187    /// Retry policy for LLM inference failures.
188    pub llm_retry_policy: LlmRetryPolicy,
189    /// Plugins to run during the agent loop.
190    pub plugins: Vec<Arc<dyn AgentPlugin>>,
191    /// Optional per-step tool provider.
192    ///
193    /// When not set, the loop uses a static provider derived from the `tools`
194    /// map passed to `run_step` / `run_loop` / `run_loop_stream`.
195    pub step_tool_provider: Option<Arc<dyn StepToolProvider>>,
196    /// Optional LLM executor override.
197    ///
198    /// When not set, the loop uses [`GenaiLlmExecutor`] with `Client::default()`.
199    pub llm_executor: Option<Arc<dyn LlmExecutor>>,
200}
201
202impl Default for AgentConfig {
203    fn default() -> Self {
204        Self {
205            id: "default".to_string(),
206            model: "gpt-4o-mini".to_string(),
207            system_prompt: String::new(),
208            max_rounds: 10,
209            tool_executor: Arc::new(ParallelToolExecutor::streaming()),
210            chat_options: Some(
211                ChatOptions::default()
212                    .with_capture_usage(true)
213                    .with_capture_reasoning_content(true)
214                    .with_capture_tool_calls(true),
215            ),
216            fallback_models: Vec::new(),
217            llm_retry_policy: LlmRetryPolicy::default(),
218            plugins: Vec::new(),
219            step_tool_provider: None,
220            llm_executor: None,
221        }
222    }
223}
224
225impl std::fmt::Debug for AgentConfig {
226    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
227        f.debug_struct("AgentConfig")
228            .field("id", &self.id)
229            .field("model", &self.model)
230            .field(
231                "system_prompt",
232                &format!("[{} chars]", self.system_prompt.len()),
233            )
234            .field("max_rounds", &self.max_rounds)
235            .field("tool_executor", &self.tool_executor.name())
236            .field("chat_options", &self.chat_options)
237            .field("fallback_models", &self.fallback_models)
238            .field("llm_retry_policy", &self.llm_retry_policy)
239            .field("plugins", &format!("[{} plugins]", self.plugins.len()))
240            .field(
241                "step_tool_provider",
242                &self.step_tool_provider.as_ref().map(|_| "<set>"),
243            )
244            .field(
245                "llm_executor",
246                &self
247                    .llm_executor
248                    .as_ref()
249                    .map(|executor| executor.name())
250                    .unwrap_or("genai_client(default)"),
251            )
252            .finish()
253    }
254}
255
256impl AgentConfig {
257    tirea_contract::impl_shared_agent_builder_methods!();
258    tirea_contract::impl_loop_config_builder_methods!();
259
260    /// Set tool executor strategy.
261    #[must_use]
262    pub fn with_tool_executor(mut self, executor: Arc<dyn ToolExecutor>) -> Self {
263        self.tool_executor = executor;
264        self
265    }
266
267    /// Set static tool map (wraps in [`StaticStepToolProvider`]).
268    ///
269    /// Prefer passing tools directly to [`run_loop`] / [`run_loop_stream`];
270    /// use this only when you need to set tools via `step_tool_provider`.
271    #[must_use]
272    pub fn with_tools(self, tools: HashMap<String, Arc<dyn Tool>>) -> Self {
273        self.with_step_tool_provider(Arc::new(StaticStepToolProvider::new(tools)))
274    }
275
276    /// Set per-step tool provider.
277    #[must_use]
278    pub fn with_step_tool_provider(mut self, provider: Arc<dyn StepToolProvider>) -> Self {
279        self.step_tool_provider = Some(provider);
280        self
281    }
282
283    /// Set LLM executor.
284    #[must_use]
285    pub fn with_llm_executor(mut self, executor: Arc<dyn LlmExecutor>) -> Self {
286        self.llm_executor = Some(executor);
287        self
288    }
289
290    /// Check if any plugins are configured.
291    pub fn has_plugins(&self) -> bool {
292        !self.plugins.is_empty()
293    }
294}