pub struct OpenAIProvider { /* private fields */ }Expand description
OpenAI LLM provider using the Chat Completions API.
Also supports OpenAI-compatible APIs (Ollama, vLLM, Azure OpenAI, etc.)
via the with_base_url constructor.
Implementations§
Source§impl OpenAIProvider
impl OpenAIProvider
Sourcepub fn new(api_key: String, model: String) -> Self
pub fn new(api_key: String, model: String) -> Self
Create a new OpenAI provider with the specified API key and model.
Sourcepub fn with_base_url(api_key: String, model: String, base_url: String) -> Self
pub fn with_base_url(api_key: String, model: String, base_url: String) -> Self
Create a new provider with a custom base URL for OpenAI-compatible APIs.
Sourcepub fn kimi(api_key: String, model: String) -> Self
pub fn kimi(api_key: String, model: String) -> Self
Create a provider using Moonshot KIMI via OpenAI-compatible Chat Completions.
Sourcepub fn kimi_k2_5(api_key: String) -> Self
pub fn kimi_k2_5(api_key: String) -> Self
Create a provider using KIMI K2.5 (default KIMI model).
Sourcepub fn kimi_k2_thinking(api_key: String) -> Self
pub fn kimi_k2_thinking(api_key: String) -> Self
Create a provider using KIMI K2 Thinking.
Sourcepub fn zai(api_key: String, model: String) -> Self
pub fn zai(api_key: String, model: String) -> Self
Create a provider using z.ai via OpenAI-compatible Chat Completions.
Sourcepub fn zai_glm5(api_key: String) -> Self
pub fn zai_glm5(api_key: String) -> Self
Create a provider using z.ai GLM-5 (default z.ai agentic reasoning model).
Sourcepub fn minimax(api_key: String, model: String) -> Self
pub fn minimax(api_key: String, model: String) -> Self
Create a provider using MiniMax via OpenAI-compatible Chat Completions.
Sourcepub fn minimax_m2_5(api_key: String) -> Self
pub fn minimax_m2_5(api_key: String) -> Self
Create a provider using MiniMax M2.5 (default MiniMax model).
Sourcepub fn gpt52_instant(api_key: String) -> Self
pub fn gpt52_instant(api_key: String) -> Self
Create a provider using GPT-5.2 Instant (speed-optimized for routine queries).
Sourcepub fn gpt54(api_key: String) -> Self
pub fn gpt54(api_key: String) -> Self
Create a provider using GPT-5.4 (frontier reasoning with 1.05M context).
Sourcepub fn gpt53_codex(api_key: String) -> Self
pub fn gpt53_codex(api_key: String) -> Self
Create a provider using GPT-5.3 Codex (latest codex model).
Sourcepub fn gpt52_thinking(api_key: String) -> Self
pub fn gpt52_thinking(api_key: String) -> Self
Create a provider using GPT-5.2 Thinking (complex reasoning, coding, analysis).
Sourcepub fn gpt52_pro(api_key: String) -> Self
pub fn gpt52_pro(api_key: String) -> Self
Create a provider using GPT-5.2 Pro (maximum accuracy for difficult problems).
Sourcepub fn gpt5(api_key: String) -> Self
pub fn gpt5(api_key: String) -> Self
Create a provider using GPT-5 (400k context, coding and reasoning).
Sourcepub fn gpt5_mini(api_key: String) -> Self
pub fn gpt5_mini(api_key: String) -> Self
Create a provider using GPT-5-mini (faster, cost-efficient GPT-5).
Sourcepub fn gpt5_nano(api_key: String) -> Self
pub fn gpt5_nano(api_key: String) -> Self
Create a provider using GPT-5-nano (fastest, cheapest GPT-5 variant).
Sourcepub fn o3(api_key: String) -> Self
pub fn o3(api_key: String) -> Self
Create a provider using o3 (most intelligent reasoning model).
Sourcepub fn o4_mini(api_key: String) -> Self
pub fn o4_mini(api_key: String) -> Self
Create a provider using o4-mini (fast, cost-efficient reasoning).
Sourcepub fn o1_mini(api_key: String) -> Self
pub fn o1_mini(api_key: String) -> Self
Create a provider using o1-mini (fast reasoning model).
Sourcepub fn gpt41(api_key: String) -> Self
pub fn gpt41(api_key: String) -> Self
Create a provider using GPT-4.1 (improved instruction following, 1M context).
Sourcepub fn gpt41_mini(api_key: String) -> Self
pub fn gpt41_mini(api_key: String) -> Self
Create a provider using GPT-4.1-mini (smaller, faster GPT-4.1).
Sourcepub fn gpt4o_mini(api_key: String) -> Self
pub fn gpt4o_mini(api_key: String) -> Self
Create a provider using GPT-4o-mini (fast and cost-effective).
Sourcepub const fn with_thinking(self, thinking: ThinkingConfig) -> Self
pub const fn with_thinking(self, thinking: ThinkingConfig) -> Self
Set the provider-owned thinking configuration for this model.
Trait Implementations§
Source§impl Clone for OpenAIProvider
impl Clone for OpenAIProvider
Source§fn clone(&self) -> OpenAIProvider
fn clone(&self) -> OpenAIProvider
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl LlmProvider for OpenAIProvider
impl LlmProvider for OpenAIProvider
Source§fn chat<'life0, 'async_trait>(
&'life0 self,
request: ChatRequest,
) -> Pin<Box<dyn Future<Output = Result<ChatOutcome>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn chat<'life0, 'async_trait>(
&'life0 self,
request: ChatRequest,
) -> Pin<Box<dyn Future<Output = Result<ChatOutcome>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn chat_stream(&self, request: ChatRequest) -> StreamBox<'_>
fn chat_stream(&self, request: ChatRequest) -> StreamBox<'_>
fn model(&self) -> &str
fn provider(&self) -> &'static str
Source§fn configured_thinking(&self) -> Option<&ThinkingConfig>
fn configured_thinking(&self) -> Option<&ThinkingConfig>
Source§fn capabilities(&self) -> Option<&'static ModelCapabilities>
fn capabilities(&self) -> Option<&'static ModelCapabilities>
Source§fn validate_thinking_config(
&self,
thinking: Option<&ThinkingConfig>,
) -> Result<()>
fn validate_thinking_config( &self, thinking: Option<&ThinkingConfig>, ) -> Result<()>
Source§fn resolve_thinking_config(
&self,
request_thinking: Option<&ThinkingConfig>,
) -> Result<Option<ThinkingConfig>>
fn resolve_thinking_config( &self, request_thinking: Option<&ThinkingConfig>, ) -> Result<Option<ThinkingConfig>>
Source§fn default_max_tokens(&self) -> u32
fn default_max_tokens(&self) -> u32
AgentConfig.max_tokens.