async_llm/providers/
mod.rs1use std::{fmt::Debug, pin::Pin};
2
3use async_trait::async_trait;
4use futures::Stream;
5
6use crate::{
7 completions::{CompletionRequest, CompletionResponse},
8 error::Error,
9 http::HttpClient,
10 request::Requestable,
11 response::Respondable,
12};
13
14pub mod config;
15pub mod openai;
16pub mod raw;
17
18pub use config::{Config, OpenAIConfig};
19pub use openai::OpenAIProvider;
20pub use raw::RawProvider;
21
22#[async_trait]
23pub trait Provider: Debug + Send + Sync {
24 type Config: Config;
25 type ChatRequest: Requestable;
26 type ChatResponse: Respondable;
27 type ChatResponseStream: Respondable;
28
29 fn config(&self) -> &Self::Config;
30
31 async fn chat(
32 &self,
33 client: &impl HttpClient,
34 request: Self::ChatRequest,
35 ) -> Result<Self::ChatResponse, Error>;
36
37 async fn chat_stream(
38 &self,
39 client: &impl HttpClient,
40 request: Self::ChatRequest,
41 ) -> Result<Pin<Box<dyn Stream<Item = Result<Self::ChatResponseStream, Error>> + Send>>, Error>;
42
43 async fn completions(
44 &self,
45 client: &impl HttpClient,
46 request: CompletionRequest,
47 ) -> Result<CompletionResponse, Error>;
48}