pub struct AgentLoop { /* private fields */ }Expand description
Agent loop executor
Implementations§
Source§impl AgentLoop
impl AgentLoop
pub fn new( llm_client: Arc<dyn LlmClient>, tool_executor: Arc<ToolExecutor>, tool_context: ToolContext, config: AgentConfig, ) -> Self
Sourcepub fn with_tool_metrics(self, metrics: Arc<RwLock<ToolMetrics>>) -> Self
pub fn with_tool_metrics(self, metrics: Arc<RwLock<ToolMetrics>>) -> Self
Set the tool metrics collector for this agent loop
Sourcepub fn with_queue(self, queue: Arc<SessionLaneQueue>) -> Self
pub fn with_queue(self, queue: Arc<SessionLaneQueue>) -> Self
Set the lane queue for priority-based tool execution.
When set, tools are routed through the lane queue which supports External task handling for multi-machine parallel processing.
Sourcepub async fn execute(
&self,
history: &[Message],
prompt: &str,
event_tx: Option<Sender<AgentEvent>>,
) -> Result<AgentResult>
pub async fn execute( &self, history: &[Message], prompt: &str, event_tx: Option<Sender<AgentEvent>>, ) -> Result<AgentResult>
Execute the agent loop for a prompt
Takes the conversation history and a new user prompt. Returns the agent result and updated message history. When event_tx is provided, uses streaming LLM API for real-time text output.
Sourcepub async fn execute_from_messages(
&self,
messages: Vec<Message>,
session_id: Option<&str>,
event_tx: Option<Sender<AgentEvent>>,
) -> Result<AgentResult>
pub async fn execute_from_messages( &self, messages: Vec<Message>, session_id: Option<&str>, event_tx: Option<Sender<AgentEvent>>, ) -> Result<AgentResult>
Execute the agent loop with pre-built messages (user message already included).
Used by send_with_attachments / stream_with_attachments where the
user message contains multi-modal content and is already appended to
the messages vec.
Sourcepub async fn execute_with_session(
&self,
history: &[Message],
prompt: &str,
session_id: Option<&str>,
event_tx: Option<Sender<AgentEvent>>,
) -> Result<AgentResult>
pub async fn execute_with_session( &self, history: &[Message], prompt: &str, session_id: Option<&str>, event_tx: Option<Sender<AgentEvent>>, ) -> Result<AgentResult>
Execute the agent loop for a prompt with session context
Takes the conversation history, user prompt, and optional session ID. When session_id is provided, context providers can use it for session-specific context.
Sourcepub async fn execute_streaming(
&self,
history: &[Message],
prompt: &str,
) -> Result<(Receiver<AgentEvent>, JoinHandle<Result<AgentResult>>)>
pub async fn execute_streaming( &self, history: &[Message], prompt: &str, ) -> Result<(Receiver<AgentEvent>, JoinHandle<Result<AgentResult>>)>
Execute with streaming events
Sourcepub async fn plan(
&self,
prompt: &str,
_context: Option<&str>,
) -> Result<ExecutionPlan>
pub async fn plan( &self, prompt: &str, _context: Option<&str>, ) -> Result<ExecutionPlan>
Create an execution plan for a prompt
Delegates to [LlmPlanner] for structured JSON plan generation,
falling back to heuristic planning if the LLM call fails.
Sourcepub async fn execute_with_planning(
&self,
history: &[Message],
prompt: &str,
event_tx: Option<Sender<AgentEvent>>,
) -> Result<AgentResult>
pub async fn execute_with_planning( &self, history: &[Message], prompt: &str, event_tx: Option<Sender<AgentEvent>>, ) -> Result<AgentResult>
Execute with planning phase
Sourcepub async fn extract_goal(&self, prompt: &str) -> Result<AgentGoal>
pub async fn extract_goal(&self, prompt: &str) -> Result<AgentGoal>
Extract goal from prompt
Delegates to [LlmPlanner] for structured JSON goal extraction,
falling back to heuristic logic if the LLM call fails.