Skip to main content

ChatClient

Trait ChatClient 

Source
pub trait ChatClient: Send + Sync {
    // Required methods
    fn chat<'life0, 'async_trait>(
        &'life0 self,
        request: ChatRequest,
    ) -> Pin<Box<dyn Future<Output = LlmResult<ChatResponse>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
    fn chat_stream<'life0, 'async_trait>(
        &'life0 self,
        request: ChatRequest,
    ) -> Pin<Box<dyn Future<Output = LlmResult<TextStream>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
    fn model(&self) -> &str;
    fn provider(&self) -> &str;
}
Expand description

Generic trait for LLM chat clients

Required Methods§

Source

fn chat<'life0, 'async_trait>( &'life0 self, request: ChatRequest, ) -> Pin<Box<dyn Future<Output = LlmResult<ChatResponse>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Send a chat completion request

Source

fn chat_stream<'life0, 'async_trait>( &'life0 self, request: ChatRequest, ) -> Pin<Box<dyn Future<Output = LlmResult<TextStream>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait,

Stream a chat completion request (yields text chunks as they arrive)

Source

fn model(&self) -> &str

Get the model name this client uses

Source

fn provider(&self) -> &str

Get the provider name (e.g., “openai”, “llama.cpp”)

Implementors§