Skip to main content

Provider

Trait Provider 

Source
pub trait Provider: Send + Sync {
Show 15 methods // Required method fn chat_with_system<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, system_prompt: Option<&'life1 str>, message: &'life2 str, model: &'life3 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait; // Provided methods fn capabilities(&self) -> ProviderCapabilities { ... } fn convert_tools(&self, tools: &[ToolSpec]) -> ToolsPayload { ... } fn simple_chat<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, message: &'life1 str, model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait { ... } fn chat_with_history<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, messages: &'life1 [ChatMessage], model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait { ... } fn chat<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, request: ChatRequest<'life1>, model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<ChatResponse>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait { ... } fn supports_native_tools(&self) -> bool { ... } fn supports_vision(&self) -> bool { ... } fn warmup<'life0, 'async_trait>( &'life0 self, ) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait { ... } fn chat_with_tools<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, messages: &'life1 [ChatMessage], _tools: &'life2 [Value], model: &'life3 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<ChatResponse>> + Send + 'async_trait>> where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait { ... } fn supports_streaming(&self) -> bool { ... } fn supports_streaming_tool_events(&self) -> bool { ... } fn stream_chat_with_system( &self, _system_prompt: Option<&str>, _message: &str, _model: &str, _temperature: f64, _options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamChunk>> { ... } fn stream_chat_with_history( &self, messages: &[ChatMessage], model: &str, temperature: f64, options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamChunk>> { ... } fn stream_chat( &self, request: ChatRequest<'_>, model: &str, temperature: f64, options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamEvent>> { ... }
}

Required Methods§

Source

fn chat_with_system<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, system_prompt: Option<&'life1 str>, message: &'life2 str, model: &'life3 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait,

One-shot chat with optional system prompt.

Kept for compatibility and advanced one-shot prompting.

Provided Methods§

Source

fn capabilities(&self) -> ProviderCapabilities

Query provider capabilities.

Default implementation returns minimal capabilities (no native tool calling). Providers should override this to declare their actual capabilities.

Source

fn convert_tools(&self, tools: &[ToolSpec]) -> ToolsPayload

Convert tool specifications to provider-native format.

Default implementation returns PromptGuided payload, which injects tool documentation into the system prompt as text. Providers with native tool calling support should override this to return their specific format (Gemini, Anthropic, OpenAI).

Source

fn simple_chat<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, message: &'life1 str, model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Simple one-shot chat (single user message, no explicit system prompt).

This is the preferred API for non-agentic direct interactions.

Source

fn chat_with_history<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, messages: &'life1 [ChatMessage], model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Multi-turn conversation. Default implementation extracts the last user message and delegates to chat_with_system.

Source

fn chat<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, request: ChatRequest<'life1>, model: &'life2 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<ChatResponse>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Structured chat API for agent loop callers.

Source

fn supports_native_tools(&self) -> bool

Whether provider supports native tool calls over API.

Source

fn supports_vision(&self) -> bool

Whether provider supports multimodal vision input.

Source

fn warmup<'life0, 'async_trait>( &'life0 self, ) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Warm up the HTTP connection pool (TLS handshake, DNS, HTTP/2 setup). Default implementation is a no-op; providers with HTTP clients should override.

Source

fn chat_with_tools<'life0, 'life1, 'life2, 'life3, 'async_trait>( &'life0 self, messages: &'life1 [ChatMessage], _tools: &'life2 [Value], model: &'life3 str, temperature: f64, ) -> Pin<Box<dyn Future<Output = Result<ChatResponse>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait, 'life3: 'async_trait,

Chat with tool definitions for native function calling support. The default implementation falls back to chat_with_history and returns an empty tool_calls vector (prompt-based tool use only).

Source

fn supports_streaming(&self) -> bool

Whether provider supports streaming responses. Default implementation returns false.

Source

fn supports_streaming_tool_events(&self) -> bool

Whether provider can emit structured tool-call stream events.

Providers should return true only when stream_chat(...) can produce StreamEvent::ToolCall for native tool-calling requests.

Source

fn stream_chat_with_system( &self, _system_prompt: Option<&str>, _message: &str, _model: &str, _temperature: f64, _options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamChunk>>

Streaming chat with optional system prompt. Returns an async stream of text chunks. Default implementation falls back to non-streaming chat.

Source

fn stream_chat_with_history( &self, messages: &[ChatMessage], model: &str, temperature: f64, options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamChunk>>

Streaming chat with history. Default implementation extracts the last user message and delegates to stream_chat_with_system, mirroring the non-streaming chat_with_history.

Source

fn stream_chat( &self, request: ChatRequest<'_>, model: &str, temperature: f64, options: StreamOptions, ) -> BoxStream<'static, StreamResult<StreamEvent>>

Structured streaming chat interface.

Default implementation adapts legacy text chunks from stream_chat_with_history into StreamEvent::TextDelta / Final.

Implementors§