Skip to main content

litellm_rust/
client.rs

1use crate::config::{Config, ProviderConfig, ProviderKind};
2use crate::error::{LiteLLMError, Result};
3use crate::providers::{anthropic, gemini, openai_compat};
4use crate::registry::Registry;
5use crate::router::{resolve_model, ResolvedModel};
6use crate::stream::ChatStream;
7use crate::types::{
8    ChatRequest, ChatResponse, EmbeddingRequest, EmbeddingResponse, ImageEditRequest, ImageRequest,
9    ImageResponse, VideoRequest, VideoResponse,
10};
11use reqwest::Client;
12
13#[derive(Debug, Clone)]
14pub struct LiteLLM {
15    config: Config,
16    client: Client,
17    registry: Registry,
18}
19
20impl LiteLLM {
21    pub fn new() -> Result<Self> {
22        let registry = Registry::load_embedded()?;
23        Ok(Self {
24            config: Config::default(),
25            client: Client::builder()
26                .timeout(std::time::Duration::from_secs(60))
27                .build()
28                .map_err(LiteLLMError::from)?,
29            registry,
30        })
31    }
32
33    pub fn with_default_provider(mut self, provider: impl Into<String>) -> Self {
34        self.config.default_provider = Some(provider.into());
35        self
36    }
37
38    pub fn with_provider(mut self, name: impl Into<String>, config: ProviderConfig) -> Self {
39        self.config.providers.insert(name.into(), config);
40        self
41    }
42
43    pub fn with_client(mut self, client: Client) -> Self {
44        self.client = client;
45        self
46    }
47
48    pub fn registry(&self) -> &Registry {
49        &self.registry
50    }
51
52    pub async fn completion(&self, mut req: ChatRequest) -> Result<ChatResponse> {
53        let resolved = resolve_model(&req.model, &self.config)?;
54        req.model = resolved.model.clone();
55        dispatch_chat(&self.client, resolved, req).await
56    }
57
58    pub async fn stream_completion(&self, mut req: ChatRequest) -> Result<ChatStream> {
59        let resolved = resolve_model(&req.model, &self.config)?;
60        req.model = resolved.model.clone();
61        match resolved.config.kind {
62            ProviderKind::OpenAICompatible => {
63                openai_compat::chat_stream(&self.client, &resolved.config, req).await
64            }
65            ProviderKind::Anthropic => {
66                anthropic::chat_stream(&self.client, &resolved.config, req).await
67            }
68            _ => Err(LiteLLMError::Unsupported(
69                "streaming not supported for provider".into(),
70            )),
71        }
72    }
73
74    pub async fn embedding(&self, mut req: EmbeddingRequest) -> Result<EmbeddingResponse> {
75        let resolved = resolve_model(&req.model, &self.config)?;
76        req.model = resolved.model.clone();
77        match resolved.config.kind {
78            ProviderKind::OpenAICompatible => {
79                openai_compat::embeddings(&self.client, &resolved.config, req).await
80            }
81            _ => Err(LiteLLMError::Unsupported(
82                "embeddings not supported for provider".into(),
83            )),
84        }
85    }
86
87    pub async fn image_generation(&self, mut req: ImageRequest) -> Result<ImageResponse> {
88        let resolved = resolve_model(&req.model, &self.config)?;
89        req.model = resolved.model.clone();
90        match resolved.config.kind {
91            ProviderKind::OpenAICompatible => {
92                openai_compat::image_generation(&self.client, &resolved.config, req).await
93            }
94            ProviderKind::Gemini => {
95                gemini::image_generation(&self.client, &resolved.config, req).await
96            }
97            _ => Err(LiteLLMError::Unsupported(
98                "image generation not supported for provider".into(),
99            )),
100        }
101    }
102
103    pub async fn image_editing(&self, mut req: ImageEditRequest) -> Result<ImageResponse> {
104        let resolved = resolve_model(&req.model, &self.config)?;
105        req.model = resolved.model.clone();
106        match resolved.config.kind {
107            ProviderKind::Gemini => {
108                gemini::image_editing(&self.client, &resolved.config, req).await
109            }
110            _ => Err(LiteLLMError::Unsupported(
111                "image editing not supported for provider".into(),
112            )),
113        }
114    }
115
116    pub async fn video_generation(&self, mut req: VideoRequest) -> Result<VideoResponse> {
117        let resolved = resolve_model(&req.model, &self.config)?;
118        req.model = resolved.model.clone();
119        match resolved.config.kind {
120            ProviderKind::OpenAICompatible => {
121                openai_compat::video_generation(&self.client, &resolved.config, req).await
122            }
123            ProviderKind::Gemini => {
124                gemini::video_generation(&self.client, &resolved.config, req).await
125            }
126            _ => Err(LiteLLMError::Unsupported(
127                "video generation not supported for provider".into(),
128            )),
129        }
130    }
131
132    pub fn estimate_cost(&self, model: &str, input_tokens: u32, output_tokens: u32) -> Option<f64> {
133        self.registry
134            .estimate_cost(model, input_tokens, output_tokens)
135    }
136}
137
138async fn dispatch_chat(
139    client: &Client,
140    resolved: ResolvedModel,
141    req: ChatRequest,
142) -> Result<ChatResponse> {
143    match resolved.config.kind {
144        ProviderKind::OpenAICompatible => openai_compat::chat(client, &resolved.config, req).await,
145        ProviderKind::Anthropic => anthropic::chat(client, &resolved.config, req).await,
146        ProviderKind::Gemini => gemini::chat(client, &resolved.config, req).await,
147    }
148}