pub fn agent_turns(
tools: impl Tool + 'static,
client: Client,
base_request: Request,
history: Vec<Message>,
history_budget: Option<usize>,
) -> AgentTurnsStreamExpand description
Drive the LLM โ tool loop using non-streaming Request::complete calls,
yielding one crate::types::CompleteResponse per LLM turn.
Like agent but at turn granularity instead of token granularity:
each item in the stream is a complete LLM response for one turn.
Intermediate turns (where the model called tools) are yielded before tool
execution; the final turn (no tool calls) is the last item.
Tool calls are executed concurrently between turns, exactly as in agent().
ยงUsage patterns
use agentix::{ToolBundle, Request, Provider};
use futures::StreamExt;
let client = reqwest::Client::new();
let request = Request::new(Provider::OpenAI, "sk-...");
// Just the final text:
let text = agentix::agent_turns(ToolBundle::default(), client.clone(), request.clone(), vec![], None)
.last_content().await;
println!("{text}");
// Full response (with usage, tool_calls, etc.):
let response = agentix::agent_turns(ToolBundle::default(), client.clone(), request.clone(), vec![], None)
.last_ok().await;
// With per-turn progress:
let mut stream = agentix::agent_turns(ToolBundle::default(), client, request, vec![], None);
while let Some(Ok(resp)) = stream.next().await {
eprintln!("turn: {} tool calls", resp.tool_calls.len());
}