pub trait ModelProvider: Send + Sync {
// Required methods
fn name(&self) -> &str;
fn max_context_tokens(&self) -> usize;
fn max_output_tokens(&self) -> usize;
fn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<ModelResponse, ProviderError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait;
// Provided methods
fn estimate_token_count(&self, text: &str) -> usize { ... }
fn estimate_message_tokens(&self, messages: &[Message]) -> usize { ... }
fn generate_stream<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<BoxStream<'static, Result<StreamEvent, ProviderError>>, ProviderError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait { ... }
}Expand description
Trait for model providers
This trait abstracts over different LLM providers (Bedrock, Anthropic, etc.) allowing the Agent to work with any provider implementation.
A provider combines model metadata (name, token limits) with API interaction (generate, streaming). Use the builder to create agents:
ⓘ
let agent = Agent::builder()
.bedrock(ClaudeSonnet4_5)
.build()
.await?;Required Methods§
Sourcefn max_context_tokens(&self) -> usize
fn max_context_tokens(&self) -> usize
Maximum input context tokens for this model
Sourcefn max_output_tokens(&self) -> usize
fn max_output_tokens(&self) -> usize
Maximum output tokens this model can generate
Sourcefn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<ModelResponse, ProviderError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<ModelResponse, ProviderError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Send a request to the model and get a response
§Arguments
messages- The conversation historytools- Available tools for the model to usesystem_prompt- Optional system prompt
Provided Methods§
Sourcefn estimate_token_count(&self, text: &str) -> usize
fn estimate_token_count(&self, text: &str) -> usize
Estimate token count for text
Providers should implement this to match their model’s tokenization. Default implementation uses ~4 characters per token heuristic.
Sourcefn estimate_message_tokens(&self, messages: &[Message]) -> usize
fn estimate_message_tokens(&self, messages: &[Message]) -> usize
Estimate token count for a conversation
Sourcefn generate_stream<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<BoxStream<'static, Result<StreamEvent, ProviderError>>, ProviderError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn generate_stream<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<ToolDefinition>,
system_prompt: Option<String>,
) -> Pin<Box<dyn Future<Output = Result<BoxStream<'static, Result<StreamEvent, ProviderError>>, ProviderError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Send a request and stream the response token-by-token (optional)
§Arguments
messages- The conversation historytools- Available tools for the model to usesystem_prompt- Optional system prompt