model_gateway_rs/clients/
llm.rs

1use async_trait::async_trait;
2use toolcraft_request::ByteStream;
3
4use crate::{
5    error::Result,
6    model::llm::{LlmInput, LlmOutput},
7    sdk::ModelSDK,
8    traits::{ModelClient, StreamModelClient},
9};
10
11pub struct LlmClient<T>
12where
13    T: ModelSDK + Sync + Send,
14{
15    pub inner: T,
16}
17
18impl<T> LlmClient<T>
19where
20    T: ModelSDK + Sync + Send,
21{
22    pub fn new(inner: T) -> Self {
23        Self { inner }
24    }
25}
26
27#[async_trait]
28impl<T> ModelClient<LlmInput, LlmOutput> for LlmClient<T>
29where
30    T: ModelSDK<Input = LlmInput, Output = LlmOutput> + Sync + Send,
31{
32    async fn infer(&self, input: LlmInput) -> Result<LlmOutput> {
33        let resp = self.inner.chat_once(input).await?;
34        Ok(resp)
35    }
36}
37
38#[async_trait]
39impl<T> StreamModelClient<LlmInput> for LlmClient<T>
40where
41    T: ModelSDK<Input = LlmInput> + Sync + Send,
42{
43    async fn infer_stream(&self, input: LlmInput) -> Result<ByteStream> {
44        let stream = self.inner.chat_stream(input).await?;
45        Ok(stream)
46    }
47}