async_llm/providers/
raw.rs

1use std::pin::Pin;
2
3use async_trait::async_trait;
4use futures::Stream;
5
6use crate::{
7    completions::{CompletionRequest, CompletionResponse},
8    error::Error,
9    http::HttpClient,
10};
11
12use super::{config::OpenAIConfig, Provider};
13
14#[derive(Debug, Clone, Default)]
15pub struct RawProvider {
16    pub(crate) config: OpenAIConfig,
17}
18
19impl RawProvider {
20    pub fn new(config: OpenAIConfig) -> Self {
21        Self { config }
22    }
23}
24
25#[async_trait]
26impl Provider for RawProvider {
27    type Config = OpenAIConfig;
28    type ChatRequest = serde_json::Value;
29    type ChatResponse = serde_json::Value;
30    type ChatResponseStream = serde_json::Value;
31
32    fn config(&self) -> &Self::Config {
33        &self.config
34    }
35
36    async fn chat(
37        &self,
38        client: &impl HttpClient,
39        request: Self::ChatRequest,
40    ) -> Result<Self::ChatResponse, Error> {
41        client.post("/chat/completions", request).await
42    }
43    async fn chat_stream(
44        &self,
45        client: &impl HttpClient,
46        request: Self::ChatRequest,
47    ) -> Result<Pin<Box<dyn Stream<Item = Result<Self::ChatResponseStream, Error>> + Send>>, Error>
48    {
49        client.post_stream("/chat/completions", request).await
50    }
51
52    async fn completions(
53        &self,
54        client: &impl HttpClient,
55        request: CompletionRequest,
56    ) -> Result<CompletionResponse, Error> {
57        client.post("/completions", request).await
58    }
59}