pub struct Client { /* private fields */ }Expand description
Stateful client for multi-turn conversations with automatic history management.
The Client is the primary interface for building conversational AI applications.
It maintains conversation history, manages streaming responses, and provides two
modes of operation: manual and automatic tool execution.
§State Management
The client maintains several pieces of state that persist across multiple turns:
- Conversation History: Complete record of all messages exchanged
- Active Stream: Currently active SSE stream being consumed
- Interrupt Flag: Thread-safe cancellation signal
- Auto-Execution Buffer: Cached blocks for auto-execution mode
§Operating Modes
§Manual Mode (default)
In manual mode, the client streams blocks directly to the caller. When the model
requests a tool, you receive a ToolUseBlock, execute the tool yourself, add the
result with add_tool_result(), and continue the conversation.
Advantages:
- Full control over tool execution
- Custom error handling per tool
- Ability to modify tool inputs/outputs
- Interactive debugging capabilities
§Automatic Mode (auto_execute_tools = true)
In automatic mode, the client executes tools transparently and only returns the final text response after all tool iterations complete.
Advantages:
- Simpler API for common use cases
- Built-in retry logic via hooks
- Automatic conversation continuation
- Configurable iteration limits
§Thread Safety
The client is NOT thread-safe for concurrent use. However, the interrupt mechanism
uses Arc<AtomicBool> which can be safely shared across threads to signal cancellation.
§Memory Management
- History grows unbounded by default (consider clearing periodically)
- Streams are consumed lazily (low memory footprint during streaming)
- Auto-execution buffers entire response (higher memory in auto mode)
§Examples
§Basic Multi-Turn Conversation
use open_agent::{Client, AgentOptions, ContentBlock};
let mut client = Client::new(AgentOptions::builder()
.model("gpt-4")
.api_key("sk-...")
.build()?)?;
// First question
client.send("What's the capital of France?").await?;
while let Some(block) = client.receive().await? {
if let ContentBlock::Text(text) = block {
println!("{}", text.text); // "Paris is the capital of France."
}
}
// Follow-up question - history is automatically maintained
client.send("What's its population?").await?;
while let Some(block) = client.receive().await? {
if let ContentBlock::Text(text) = block {
println!("{}", text.text); // "Paris has approximately 2.2 million people."
}
}§Manual Tool Execution
use open_agent::{Client, AgentOptions, ContentBlock, Tool};
use serde_json::json;
let calculator = Tool::new(
"calculator",
"Performs arithmetic",
json!({"type": "object"}),
|input| Box::pin(async move { Ok(json!({"result": 42})) })
);
let mut client = Client::new(AgentOptions::builder()
.model("gpt-4")
.api_key("sk-...")
.tools(vec![calculator])
.build()?)?;
client.send("What's 2+2?").await?;
while let Some(block) = client.receive().await? {
match block {
ContentBlock::ToolUse(tool_use) => {
// Execute tool manually
let result = json!({"result": 4});
client.add_tool_result(tool_use.id(), result)?;
// Continue conversation to get model's response
client.send("").await?;
}
ContentBlock::Text(text) => {
println!("{}", text.text); // "The result is 4."
}
ContentBlock::ToolResult(_) | ContentBlock::Image(_) => {}
}
}§Automatic Tool Execution
use open_agent::{Client, AgentOptions, ContentBlock, Tool};
use serde_json::json;
let calculator = Tool::new(
"calculator",
"Performs arithmetic",
json!({"type": "object"}),
|input| Box::pin(async move { Ok(json!({"result": 42})) })
);
let mut client = Client::new(AgentOptions::builder()
.model("gpt-4")
.api_key("sk-...")
.tools(vec![calculator])
.auto_execute_tools(true) // Enable auto-execution
.build()?)?;
client.send("What's 2+2?").await?;
// Tools execute automatically - you only receive final text
while let Some(block) = client.receive().await? {
if let ContentBlock::Text(text) = block {
println!("{}", text.text); // "The result is 4."
}
}§With Interruption
use open_agent::{Client, AgentOptions};
use std::time::Duration;
let mut client = Client::new(AgentOptions::default())?;
// Start a long-running query
client.send("Write a very long story").await?;
// Spawn a task to interrupt after timeout
let interrupt_handle = client.interrupt_handle();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(5)).await;
interrupt_handle.store(true, std::sync::atomic::Ordering::SeqCst);
});
// This loop will stop when interrupted
while let Some(block) = client.receive().await? {
// Process blocks...
}
// Client is still usable after interruption
client.send("What's 2+2?").await?;Implementations§
Source§impl Client
impl Client
Sourcepub fn new(options: AgentOptions) -> Result<Self>
pub fn new(options: AgentOptions) -> Result<Self>
Creates a new client with the specified configuration.
This constructor initializes all state fields and creates a reusable HTTP client
configured with the timeout from AgentOptions.
§Parameters
options: Configuration including model, API key, tools, hooks, etc.
§Errors
Returns an error if the HTTP client cannot be built. This can happen due to:
- Invalid TLS configuration
- System resource exhaustion
- Invalid timeout values
§Examples
use open_agent::{Client, AgentOptions};
let client = Client::new(AgentOptions::builder()
.model("gpt-4")
.base_url("http://localhost:1234/v1")
.build()?)?;Sourcepub async fn send(&mut self, prompt: &str) -> Result<()>
pub async fn send(&mut self, prompt: &str) -> Result<()>
Sends a user message and initiates streaming of the model’s response.
This method performs several critical steps:
- Executes UserPromptSubmit hooks (which can modify or block the prompt)
- Adds the user message to conversation history
- Builds and sends HTTP request to the OpenAI-compatible API
- Parses the SSE stream and sets up aggregation
- Stores the stream for consumption via
receive()
§Parameters
prompt: The user’s message. Can be empty to continue conversation after adding tool results (common pattern in manual tool execution mode).
§Returns
Ok(()): Request sent successfully, callreceive()to get blocksErr(e): Request failed (network error, API error, hook blocked, etc.)
§Behavior Details
§Hook Execution
Before sending, UserPromptSubmit hooks are executed. Hooks can:
- Modify the prompt text
- Block the request entirely
- Access conversation history
If a hook blocks the request, this method returns an error immediately.
§History Management
The prompt is added to history BEFORE sending the request. This ensures that history is consistent even if the request fails.
§Stream Setup
The response stream is set up but not consumed. You must call receive()
repeatedly to get content blocks. The stream remains active until:
- All blocks are consumed (stream naturally ends)
- An error occurs
- Interrupt is triggered
§Interrupt Handling
The interrupt flag is reset to false at the start of this method,
allowing a fresh request after a previous interruption.
§State Changes
- Resets
interruptedflag tofalse - Appends user message to
history - Sets
current_streamto new SSE stream - Does NOT modify
auto_exec_bufferorauto_exec_index
§Errors
Returns errors for:
- Hook blocking the prompt
- HTTP client errors (network failure, DNS, etc.)
- API errors (auth failure, invalid model, rate limits)
- Invalid response format
After an error, the client remains usable for new requests.
§Examples
§Basic Usage
client.send("Hello!").await?;
while let Some(block) = client.receive().await? {
// Process blocks...
}§Continuing After Tool Result
client.send("Use the calculator").await?;
while let Some(block) = client.receive().await? {
if let ContentBlock::ToolUse(tool_use) = block {
// Execute tool and add result
client.add_tool_result(tool_use.id(), json!({"result": 42}))?;
// Continue conversation with empty prompt
client.send("").await?;
}
}Sourcepub async fn send_message(&mut self, message: Message) -> Result<()>
pub async fn send_message(&mut self, message: Message) -> Result<()>
Receives the next content block from the current stream.
This is the primary method for consuming responses from the model. It works differently depending on the operating mode:
§Manual Mode (default)
Streams blocks directly from the API response as they arrive. You receive:
TextBlock: Incremental text from the modelToolUseBlock: Requests to execute tools- Other block types as they’re emitted
When you receive a ToolUseBlock, you must:
- Execute the tool yourself
- Call
add_tool_result()with the result - Call
send("")to continue the conversation
§Automatic Mode (auto_execute_tools = true)
Transparently executes tools and only returns final text blocks. The first
call to receive() triggers the auto-execution loop which:
- Collects all blocks from the stream
- Executes any tool calls automatically
- Continues the conversation until reaching a text-only response
- Buffers the final text blocks
- Returns them one at a time on subsequent
receive()calls
§Returns
Ok(Some(block)): Successfully received a content blockOk(None): Stream ended normally or was interruptedErr(e): An error occurred during streaming or tool execution
§Behavior Details
§Interruption
Checks the interrupt flag on every call. If interrupted, immediately returns
Ok(None) and clears the stream. The client can be reused after interruption.
§Stream Lifecycle
- After
send(), stream is active - Each
receive()call yields one block - When stream ends, returns
Ok(None) - Subsequent calls continue returning
Ok(None)until nextsend()
§Auto-Execution Buffer
In auto mode, blocks are buffered in memory. The buffer persists until fully consumed (index reaches length), at which point it’s cleared.
§State Changes
- Advances stream position
- In auto mode: May trigger entire execution loop and modify history
- In manual mode: Only reads from stream, no history changes
- Increments
auto_exec_indexwhen returning buffered blocks
§Examples
§Manual Mode - Basic
client.send("Hello!").await?;
while let Some(block) = client.receive().await? {
match block {
ContentBlock::Text(text) => print!("{}", text.text),
ContentBlock::ToolUse(_) | ContentBlock::ToolResult(_) | ContentBlock::Image(_) => {}
}
}§Manual Mode - With Tools
client.send("Use the calculator").await?;
while let Some(block) = client.receive().await? {
match block {
ContentBlock::Text(text) => {
println!("{}", text.text);
}
ContentBlock::ToolUse(tool_use) => {
println!("Executing: {}", tool_use.name());
// Execute tool manually
let result = json!({"result": 42});
// Add result and continue
client.add_tool_result(tool_use.id(), result)?;
client.send("").await?;
}
ContentBlock::ToolResult(_) | ContentBlock::Image(_) => {}
}
}§Auto Mode
let mut client = Client::new(AgentOptions::builder()
.auto_execute_tools(true)
.build()?)?;
client.send("Calculate 2+2").await?;
// Tools execute automatically - you only get final text
while let Some(block) = client.receive().await? {
if let ContentBlock::Text(text) = block {
println!("{}", text.text);
}
}§With Error Handling
client.send("Hello").await?;
loop {
match client.receive().await {
Ok(Some(block)) => {
// Process block
}
Ok(None) => {
// Stream ended
break;
}
Err(e) => {
eprintln!("Error: {}", e);
break;
}
}
}Sends a pre-built message to the AI model.
This method allows sending messages with images or custom content blocks
that cannot be expressed as simple text prompts. Use the Message helper
methods like user_with_image(),
user_with_image_detail(), or
user_with_base64_image() to create
messages with multimodal content.
Unlike send(), this method:
- Accepts pre-built
Messageobjects instead of text prompts - Bypasses
UserPromptSubmithooks (since message is already constructed) - Enables multimodal interactions (text + images)
After calling this method, use receive() to get the
response content blocks.
§Arguments
message- A pre-built message (typically created withMessage::user_with_image()or similar helpers)
§Errors
Returns Error if:
- Network request fails
- Server returns an error
- Response cannot be parsed
- Request is interrupted via
interrupt()
§Example
use open_agent::{Client, AgentOptions, Message, ImageDetail};
let options = AgentOptions::builder()
.model("gpt-4-vision-preview")
.base_url("http://localhost:1234/v1")
.build()?;
let mut client = Client::new(options)?;
// Send a message with an image
let msg = Message::user_with_image(
"What's in this image?",
"https://example.com/photo.jpg"
)?;
client.send_message(msg).await?;
// Receive the response
while let Some(block) = client.receive().await? {
// Process response blocks
}pub async fn receive(&mut self) -> Result<Option<ContentBlock>>
Sourcepub fn interrupt(&self)
pub fn interrupt(&self)
Interrupts the current operation by setting the interrupt flag.
This method provides a thread-safe way to cancel any in-progress streaming
operation. The interrupt flag is checked by receive() before each block,
allowing responsive cancellation.
§Behavior
- Sets the atomic interrupt flag to
true - Next
receive()call will returnOk(None)and clear the stream - Flag is automatically reset to
falseon nextsend()call - Safe to call from any thread (uses atomic operations)
- Idempotent: calling multiple times has same effect as calling once
- No-op if no operation is in progress
§Thread Safety
This method uses Arc<AtomicBool> internally, which can be safely shared
across threads. You can clone the interrupt handle and use it from different
threads or async tasks:
let mut client = Client::new(AgentOptions::default())?;
let interrupt_handle = client.interrupt_handle();
// Use from another thread
tokio::spawn(async move {
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
interrupt_handle.store(true, std::sync::atomic::Ordering::SeqCst);
});§State Changes
- Sets
interruptedflag totrue - Does NOT modify stream, history, or other state directly
- Effect takes place on next
receive()call
§Use Cases
- User cancellation (e.g., stop button in UI)
- Timeout enforcement
- Resource cleanup
- Emergency shutdown
§Examples
§Basic Interruption
use open_agent::{Client, AgentOptions};
let mut client = Client::new(AgentOptions::default())?;
client.send("Tell me a long story").await?;
// Interrupt after receiving some blocks
let mut count = 0;
while let Some(block) = client.receive().await? {
count += 1;
if count >= 5 {
client.interrupt();
}
}
// Client is ready for new queries
client.send("What's 2+2?").await?;§With Timeout
use open_agent::{Client, AgentOptions};
use std::time::Duration;
let mut client = Client::new(AgentOptions::default())?;
client.send("Long request").await?;
// Spawn timeout task
let interrupt_handle = client.interrupt_handle();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(10)).await;
interrupt_handle.store(true, std::sync::atomic::Ordering::SeqCst);
});
while let Some(_block) = client.receive().await? {
// Process until timeout
}Sourcepub fn interrupt_handle(&self) -> Arc<AtomicBool>
pub fn interrupt_handle(&self) -> Arc<AtomicBool>
Returns a clone of the interrupt handle for thread-safe cancellation.
This method provides access to the shared Arc<AtomicBool> interrupt flag,
allowing it to be used from other threads or async tasks to signal cancellation.
§Returns
A cloned Arc<AtomicBool> that can be used to interrupt operations from any thread.
§Examples
let mut client = Client::new(AgentOptions::default())?;
let interrupt_handle = client.interrupt_handle();
// Use from another thread
tokio::spawn(async move {
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
interrupt_handle.store(true, std::sync::atomic::Ordering::SeqCst);
});Sourcepub fn history(&self) -> &[Message]
pub fn history(&self) -> &[Message]
Returns a reference to the conversation history.
The history contains all messages exchanged in the conversation, including:
- User messages
- Assistant messages (with text and tool use blocks)
- Tool result messages
§Returns
A slice of Message objects in chronological order.
§Use Cases
- Inspecting conversation context
- Debugging tool execution flow
- Saving conversation state
- Implementing custom history management
§Examples
let client = Client::new(AgentOptions::default())?;
// Initially empty
assert_eq!(client.history().len(), 0);Sourcepub fn history_mut(&mut self) -> &mut Vec<Message>
pub fn history_mut(&mut self) -> &mut Vec<Message>
Returns a mutable reference to the conversation history.
This allows you to modify the history directly for advanced use cases like:
- Removing old messages to manage context length
- Editing messages for retry scenarios
- Injecting synthetic messages for testing
§Warning
Modifying history directly can lead to inconsistent conversation state if not done carefully. The SDK expects history to follow the proper message flow (user → assistant → tool results → assistant, etc.).
§Examples
let mut client = Client::new(AgentOptions::default())?;
// Remove oldest messages to stay within context limit
if client.history().len() > 50 {
client.history_mut().drain(0..10);
}Sourcepub fn options(&self) -> &AgentOptions
pub fn options(&self) -> &AgentOptions
Returns a reference to the agent configuration options.
Provides read-only access to the AgentOptions used to configure this client.
§Use Cases
- Inspecting current configuration
- Debugging issues
- Conditional logic based on settings
§Examples
let client = Client::new(AgentOptions::builder()
.model("gpt-4")
.base_url("http://localhost:1234/v1")
.build()?)?;
println!("Using model: {}", client.options().model());Sourcepub fn clear_history(&mut self)
pub fn clear_history(&mut self)
Clears all conversation history.
This resets the conversation to a blank slate while preserving the client configuration (tools, hooks, model, etc.). The next message will start a fresh conversation with no prior context.
§State Changes
- Clears
historyvector - Does NOT modify current stream, options, or other state
§Use Cases
- Starting a new conversation
- Preventing context length issues
- Clearing sensitive data
- Implementing conversation sessions
§Examples
let mut client = Client::new(AgentOptions::default())?;
// First conversation
client.send("Hello").await?;
while let Some(_) = client.receive().await? {}
// Clear and start fresh
client.clear_history();
// New conversation with no memory of previous
client.send("Hello again").await?;Sourcepub fn add_tool_result(
&mut self,
tool_use_id: &str,
content: Value,
) -> Result<()>
pub fn add_tool_result( &mut self, tool_use_id: &str, content: Value, ) -> Result<()>
Adds a tool result to the conversation history for manual tool execution.
This method is used exclusively in manual mode after receiving a ToolUseBlock.
The workflow is:
receive()returns aToolUseBlock- You execute the tool yourself
- Call
add_tool_result()with the tool’s output - Call
send("")to continue the conversation - The model receives the tool result and generates a response
§Parameters
tool_use_id: The unique ID from theToolUseBlock(must match exactly)content: The tool’s output as a JSON value
§Behavior
Creates a ToolResultBlock and adds it to conversation history as a tool message.
This preserves the tool call/result pairing that the model needs to understand
the conversation flow.
§State Changes
- Appends a tool message to
history - Does NOT modify stream or trigger any requests
§Important Notes
- Not used in auto mode: Auto-execution handles tool results automatically
- ID must match: The
tool_use_idmust match the ID from theToolUseBlock - No validation: This method doesn’t validate the result format
- Must call send(): After adding result(s), call
send("")to continue
§Examples
§Basic Manual Tool Execution
use open_agent::{Client, AgentOptions, ContentBlock};
use serde_json::json;
let mut client = Client::new(AgentOptions::default())?;
client.send("Use the calculator").await?;
while let Some(block) = client.receive().await? {
match block {
ContentBlock::ToolUse(tool_use) => {
// Execute tool manually
let result = json!({"result": 42});
// Add result to history
client.add_tool_result(tool_use.id(), result)?;
// Continue conversation to get model's response
client.send("").await?;
}
ContentBlock::Text(text) => {
println!("{}", text.text);
}
ContentBlock::ToolResult(_) | ContentBlock::Image(_) => {}
}
}§Handling Tool Errors
use open_agent::{Client, AgentOptions, ContentBlock};
use serde_json::json;
while let Some(block) = client.receive().await? {
if let ContentBlock::ToolUse(tool_use) = block {
// Try to execute tool
let result = match execute_tool(tool_use.name(), tool_use.input()) {
Ok(output) => output,
Err(e) => json!({
"error": e.to_string(),
"tool": tool_use.name()
})
};
client.add_tool_result(tool_use.id(), result)?;
client.send("").await?;
}
}
§Multiple Tool Calls
use open_agent::{Client, AgentOptions, ContentBlock};
use serde_json::json;
client.send("Calculate 2+2 and 3+3").await?;
let mut tool_calls = Vec::new();
// Collect all tool calls
while let Some(block) = client.receive().await? {
if let ContentBlock::ToolUse(tool_use) = block {
tool_calls.push(tool_use);
}
}
// Execute and add results for all tools
for tool_call in tool_calls {
let result = json!({"result": 42}); // Execute tool
client.add_tool_result(tool_call.id(), result)?;
}
// Continue conversation
client.send("").await?;Sourcepub fn get_tool(&self, name: &str) -> Option<&Tool>
pub fn get_tool(&self, name: &str) -> Option<&Tool>
Looks up a registered tool by name.
This method provides access to the tool registry for manual execution scenarios.
It searches the tools registered in AgentOptions and returns a reference to
the matching tool if found.
§Parameters
name: The tool name to search for (case-sensitive)
§Returns
Some(&Tool): Tool foundNone: No tool with that name
§Use Cases
- Manual tool execution in response to
ToolUseBlock - Validating tool availability before offering features
- Inspecting tool metadata (name, description, schema)
§Examples
§Execute Tool Manually
use open_agent::{Client, AgentOptions, ContentBlock};
while let Some(block) = client.receive().await? {
if let ContentBlock::ToolUse(tool_use) = block {
if let Some(tool) = client.get_tool(tool_use.name()) {
// Execute the tool
let result = tool.execute(tool_use.input().clone()).await?;
client.add_tool_result(tool_use.id(), result)?;
client.send("").await?;
} else {
println!("Unknown tool: {}", tool_use.name());
}
}
}§Check Tool Availability
if client.get_tool("calculator").is_some() {
println!("Calculator is available");
}