LLMProvider

Trait LLMProvider 

Source
pub trait LLMProvider: Send + Sync {
    // Required methods
    fn name(&self) -> &str;
    fn generate<'life0, 'async_trait>(
        &'life0 self,
        request: LLMRequest,
    ) -> Pin<Box<dyn Future<Output = Result<LLMResponse, LLMError>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
    fn supported_models(&self) -> Vec<String>;
    fn validate_request(&self, request: &LLMRequest) -> Result<(), LLMError>;

    // Provided methods
    fn supports_streaming(&self) -> bool { ... }
    fn supports_reasoning(&self, _model: &str) -> bool { ... }
    fn supports_reasoning_effort(&self, _model: &str) -> bool { ... }
    fn stream<'life0, 'async_trait>(
        &'life0 self,
        request: LLMRequest,
    ) -> Pin<Box<dyn Future<Output = Result<LLMStream, LLMError>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait { ... }
}
Expand description

Universal LLM provider trait

Required Methods§

Source

fn name(&self) -> &str

Provider name (e.g., “gemini”, “openai”, “anthropic”)

Source

fn generate<'life0, 'async_trait>( &'life0 self, request: LLMRequest, ) -> Pin<Box<dyn Future<Output = Result<LLMResponse, LLMError>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Generate completion

Source

fn supported_models(&self) -> Vec<String>

Get supported models

Source

fn validate_request(&self, request: &LLMRequest) -> Result<(), LLMError>

Validate request for this provider

Provided Methods§

Source

fn supports_streaming(&self) -> bool

Whether the provider has native streaming support

Source

fn supports_reasoning(&self, _model: &str) -> bool

Whether the provider surfaces structured reasoning traces for the given model

Source

fn supports_reasoning_effort(&self, _model: &str) -> bool

Whether the provider accepts configurable reasoning effort for the model

Source

fn stream<'life0, 'async_trait>( &'life0 self, request: LLMRequest, ) -> Pin<Box<dyn Future<Output = Result<LLMStream, LLMError>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Stream completion (optional)

Implementors§