pub struct Client { /* private fields */ }Expand description
Main client for interacting with the Anthropic API.
The Client provides a high-level interface for sending messages to Claude models,
streaming responses, and managing API interactions. It’s designed to be thread-safe
and can be cloned cheaply for use across multiple tasks.
§Examples
§Basic Usage
use anthropic_rust::{Client, Model, ContentBlock};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with default configuration
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Send a simple message
let request = client.chat_builder()
.user_message(ContentBlock::text("Hello, Claude!"))
.build();
let response = client.execute_chat(request).await?;
println!("Response: {:?}", response);
Ok(())
}§Advanced Configuration
use anthropic_rust::{Client, Model};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::builder()
.api_key("your-api-key")
.model(Model::Claude35Sonnet20241022)
.max_tokens(2000)
.timeout(Duration::from_secs(30))
.build()?;
// Use the configured client...
Ok(())
}§Streaming Responses
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Tell me a story"))
.build();
let mut stream = client.stream_chat(request).await?;
while let Some(event) = stream.next().await {
match event? {
StreamEvent::ContentBlockDelta { delta, .. } => {
// Handle streaming text
}
_ => {}
}
}
Ok(())
}Implementations§
Source§impl Client
impl Client
Sourcepub fn builder() -> ClientBuilder
pub fn builder() -> ClientBuilder
Create a new client builder for advanced configuration.
Use this method when you need to customize client settings beyond the defaults. The builder provides a fluent API for setting API keys, timeouts, base URLs, and more.
§Examples
use anthropic_rust::{Client, Model};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::builder()
.api_key("your-api-key")
.model(Model::Claude35Sonnet20241022)
.max_tokens(2000)
.timeout(Duration::from_secs(30))
.build()?;
Ok(())
}Sourcepub fn new(model: Model) -> Result<Self>
pub fn new(model: Model) -> Result<Self>
Create a new client with the specified model using environment variables for configuration.
This is the simplest way to create a client. It will automatically read the API key
from the ANTHROPIC_API_KEY environment variable and use default settings for
everything else.
§Arguments
model- The Claude model to use for requests
§Errors
Returns an error if:
- The
ANTHROPIC_API_KEYenvironment variable is not set - The API key is invalid or empty
- Network configuration fails
§Examples
use anthropic_rust::{Client, Model};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Requires ANTHROPIC_API_KEY environment variable
let client = Client::new(Model::Claude35Sonnet20241022)?;
Ok(())
}Sourcepub async fn execute_chat(&self, request: ChatRequest) -> Result<Message>
pub async fn execute_chat(&self, request: ChatRequest) -> Result<Message>
Execute a chat request using the client’s configured model and max_tokens.
This is the primary method for sending messages to Claude. It uses the model and max_tokens configured when the client was created.
§Arguments
request- The chat request containing messages and optional parameters
§Returns
Returns a Message containing Claude’s response, including content blocks,
usage statistics, and metadata.
§Errors
This method can return various errors:
Error::Authentication- Invalid API keyError::RateLimit- Too many requestsError::Network- Network connectivity issuesError::Api- API-specific errors (invalid parameters, etc.)
§Examples
use anthropic_rust::{Client, Model, ContentBlock};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("What is the capital of France?"))
.build();
let response = client.execute_chat(request).await?;
for content in response.content {
if let ContentBlock::Text { text, .. } = content {
println!("Claude: {}", text);
}
}
Ok(())
}Sourcepub async fn execute_chat_with_model(
&self,
model: Model,
request: ChatRequest,
) -> Result<Message>
pub async fn execute_chat_with_model( &self, model: Model, request: ChatRequest, ) -> Result<Message>
Execute a chat request with a specific model override.
Use this method when you want to use a different model for a specific request without changing the client’s default configuration.
§Arguments
model- The model to use for this specific requestrequest- The chat request containing messages and optional parameters
§Examples
use anthropic_rust::{Client, Model, ContentBlock};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Client configured with Sonnet
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Quick question: what's 2+2?"))
.build();
// Use faster Haiku model for this simple request
let response = client.execute_chat_with_model(
Model::Claude3Haiku20240307,
request
).await?;
println!("Used model: {:?}", response.model);
Ok(())
}Sourcepub async fn execute_chat_with_options(
&self,
model: Model,
request: ChatRequest,
timeout: Option<Duration>,
) -> Result<Message>
pub async fn execute_chat_with_options( &self, model: Model, request: ChatRequest, timeout: Option<Duration>, ) -> Result<Message>
Execute a chat request with model and timeout overrides.
This method allows you to override both the model and timeout for a specific request.
§Arguments
model- The model to use for this specific requestrequest- The chat request containing messages and optional parameterstimeout- Optional timeout override for this request
§Examples
use anthropic_rust::{Client, Model, ContentBlock};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("This might take a while..."))
.build();
// Use longer timeout for this specific request
let response = client.execute_chat_with_options(
Model::Claude35Sonnet20241022,
request,
Some(Duration::from_secs(120))
).await?;
Ok(())
}Sourcepub async fn execute_chat_with_timeout(
&self,
request: ChatRequest,
timeout: Duration,
) -> Result<Message>
pub async fn execute_chat_with_timeout( &self, request: ChatRequest, timeout: Duration, ) -> Result<Message>
Execute a chat request with timeout override using the client’s default model.
§Arguments
request- The chat request containing messages and optional parameterstimeout- Timeout override for this request
§Examples
use anthropic_rust::{Client, Model, ContentBlock};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Quick question"))
.build();
// Use shorter timeout for this quick request
let response = client.execute_chat_with_timeout(
request,
Duration::from_secs(10)
).await?;
Ok(())
}Sourcepub async fn stream_chat(&self, request: ChatRequest) -> Result<MessageStream>
pub async fn stream_chat(&self, request: ChatRequest) -> Result<MessageStream>
Stream a chat request using the client’s configured model and max_tokens.
This method enables real-time streaming of Claude’s response, allowing you to process and display content as it’s generated. This is ideal for interactive applications where you want to show progress to users.
§Arguments
request- The chat request containing messages and optional parameters
§Returns
Returns a MessageStream that yields StreamEvents as Claude generates the response.
Events include message start/stop, content block deltas, and usage information.
§Examples
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Write a short story"))
.build();
let mut stream = client.stream_chat(request).await?;
while let Some(event) = stream.next().await {
match event? {
StreamEvent::ContentBlockDelta { delta, .. } => {
if let anthropic_rust::ContentDelta::TextDelta { text } = delta {
print!("{}", text); // Print text as it streams
}
}
StreamEvent::MessageStop => break,
_ => {}
}
}
Ok(())
}Sourcepub async fn stream_chat_with_model(
&self,
model: Model,
request: ChatRequest,
) -> Result<MessageStream>
pub async fn stream_chat_with_model( &self, model: Model, request: ChatRequest, ) -> Result<MessageStream>
Stream a chat request with a specific model override.
Like stream_chat, but allows you to specify a different model for this
specific request without changing the client’s default configuration.
§Arguments
model- The model to use for this specific requestrequest- The chat request containing messages and optional parameters
§Examples
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Quick response needed"))
.build();
// Use Haiku for faster streaming
let mut stream = client.stream_chat_with_model(
Model::Claude3Haiku20240307,
request
).await?;
// Process stream events...
Ok(())
}Sourcepub async fn stream_chat_with_options(
&self,
model: Model,
request: ChatRequest,
timeout: Option<Duration>,
) -> Result<MessageStream>
pub async fn stream_chat_with_options( &self, model: Model, request: ChatRequest, timeout: Option<Duration>, ) -> Result<MessageStream>
Stream a chat request with model and timeout overrides.
This method allows you to override both the model and timeout for a specific streaming request.
§Arguments
model- The model to use for this specific requestrequest- The chat request containing messages and optional parameterstimeout- Optional timeout override for this request
§Examples
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Generate a long story"))
.build();
// Use longer timeout for streaming long content
let mut stream = client.stream_chat_with_options(
Model::Claude35Sonnet20241022,
request,
Some(Duration::from_secs(300))
).await?;
// Process stream events...
Ok(())
}Sourcepub async fn stream_chat_with_timeout(
&self,
request: ChatRequest,
timeout: Duration,
) -> Result<MessageStream>
pub async fn stream_chat_with_timeout( &self, request: ChatRequest, timeout: Duration, ) -> Result<MessageStream>
Stream a chat request with timeout override using the client’s default model.
§Arguments
request- The chat request containing messages and optional parameterstimeout- Timeout override for this request
§Examples
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Quick question"))
.build();
// Use shorter timeout for quick streaming
let mut stream = client.stream_chat_with_timeout(
request,
Duration::from_secs(15)
).await?;
// Process stream events...
Ok(())
}Sourcepub async fn count_tokens(
&self,
request: CountTokensRequest,
) -> Result<TokenCount>
pub async fn count_tokens( &self, request: CountTokensRequest, ) -> Result<TokenCount>
Count tokens in a request without sending it to Claude.
This method allows you to estimate token usage before making an actual request, which is useful for cost estimation and ensuring you stay within token limits.
§Arguments
request- The token counting request containing messages to analyze
§Returns
Returns a TokenCount with the estimated input token count.
§Examples
use anthropic_rust::{Client, Model, ContentBlock, types::CountTokensRequest};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = CountTokensRequest {
messages: vec![
anthropic_rust::types::MessageParam {
role: anthropic_rust::Role::User,
content: vec![ContentBlock::text("How many tokens is this message?")],
}
],
system: None,
tools: None,
};
let token_count = client.count_tokens(request).await?;
println!("Input tokens: {}", token_count.input_tokens);
Ok(())
}Sourcepub fn chat_builder(&self) -> ChatRequestBuilder
pub fn chat_builder(&self) -> ChatRequestBuilder
Create a new chat request builder.
The builder provides a fluent API for constructing chat requests with messages, system prompts, tools, and other parameters.
§Examples
use anthropic_rust::{Client, Model, ContentBlock, Role};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.system("You are a helpful assistant.")
.user_message(ContentBlock::text("Hello!"))
.assistant_message(ContentBlock::text("Hi there! How can I help?"))
.user_message(ContentBlock::text("What's the weather like?"))
.temperature(0.7)
.build();
let response = client.execute_chat(request).await?;
Ok(())
}Sourcepub fn default_model(&self) -> Model
pub fn default_model(&self) -> Model
Get the client’s default model.
Returns the model that will be used for requests when no model override is specified.
§Examples
use anthropic_rust::{Client, Model};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
println!("Default model: {:?}", client.default_model());
Ok(())
}Sourcepub fn default_max_tokens(&self) -> u32
pub fn default_max_tokens(&self) -> u32
Get the client’s default max_tokens setting.
Returns the maximum number of tokens that will be used for response generation when no override is specified.
§Examples
use anthropic_rust::{Client, Model};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
println!("Default max tokens: {}", client.default_max_tokens());
Ok(())
}