1use crate::config::{AnthropicConfig, LLMConfig, LMStudioConfig, OllamaConfig, OpenAIConfig};
7use crate::error::{LlmError, LlmResult};
8use crate::logging::log_debug;
9use crate::messages::UnifiedLLMRequest;
10#[cfg(feature = "events")]
11use crate::provider::LLMBusinessEvent;
12use crate::provider::{LlmProvider, RequestConfig, Response, ToolCallingRound};
13use crate::providers::{AnthropicProvider, LMStudioProvider, OllamaProvider, OpenAIProvider};
14use async_trait::async_trait;
15
16enum LLMProvider {
18 Anthropic(AnthropicProvider),
19 OpenAI(OpenAIProvider),
20 LMStudio(LMStudioProvider),
21 Ollama(OllamaProvider),
22}
23
24pub struct UnifiedLLMClient {
121 provider: LLMProvider,
122}
123
124impl UnifiedLLMClient {
125 fn create_anthropic_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
127 let anthropic_config = config
128 .provider
129 .as_any()
130 .downcast_ref::<AnthropicConfig>()
131 .ok_or_else(|| LlmError::configuration_error("Invalid Anthropic configuration"))?;
132
133 let provider =
134 AnthropicProvider::new(anthropic_config.clone(), config.default_params.clone())
135 .map_err(|e| {
136 LlmError::configuration_error(format!(
137 "Failed to create Anthropic provider for model {}: {}",
138 model, e
139 ))
140 })?;
141
142 Ok(LLMProvider::Anthropic(provider))
143 }
144
145 fn create_openai_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
147 let openai_config = config
148 .provider
149 .as_any()
150 .downcast_ref::<OpenAIConfig>()
151 .ok_or_else(|| LlmError::configuration_error("Invalid OpenAI configuration"))?;
152
153 let provider = OpenAIProvider::new(openai_config.clone(), config.default_params.clone())
154 .map_err(|e| {
155 LlmError::configuration_error(format!(
156 "Failed to create OpenAI provider for model {}: {}",
157 model, e
158 ))
159 })?;
160
161 Ok(LLMProvider::OpenAI(provider))
162 }
163
164 fn create_lmstudio_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
166 let lmstudio_config = config
167 .provider
168 .as_any()
169 .downcast_ref::<LMStudioConfig>()
170 .ok_or_else(|| LlmError::configuration_error("Invalid LM Studio configuration"))?;
171
172 let provider =
173 LMStudioProvider::new(lmstudio_config.clone(), config.default_params.clone()).map_err(
174 |e| {
175 LlmError::configuration_error(format!(
176 "Failed to create LM Studio provider for model {}: {}",
177 model, e
178 ))
179 },
180 )?;
181
182 Ok(LLMProvider::LMStudio(provider))
183 }
184
185 fn create_ollama_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
187 let ollama_config = config
188 .provider
189 .as_any()
190 .downcast_ref::<OllamaConfig>()
191 .ok_or_else(|| LlmError::configuration_error("Invalid Ollama configuration"))?;
192
193 let provider = OllamaProvider::new(ollama_config.clone(), config.default_params.clone())
194 .map_err(|e| {
195 LlmError::configuration_error(format!(
196 "Failed to create Ollama provider for model {}: {}",
197 model, e
198 ))
199 })?;
200
201 Ok(LLMProvider::Ollama(provider))
202 }
203
204 pub fn create(provider_name: &str, model: String, config: LLMConfig) -> LlmResult<Self> {
217 let provider = match provider_name {
218 "anthropic" => Self::create_anthropic_provider(&config, &model)?,
219 "openai" => Self::create_openai_provider(&config, &model)?,
220 "lmstudio" => Self::create_lmstudio_provider(&config, &model)?,
221 "ollama" => Self::create_ollama_provider(&config, &model)?,
222 _ => return Err(LlmError::unsupported_provider(provider_name)),
223 };
224
225 log_debug!(
226 provider = provider_name,
227 model = %model,
228 "UnifiedLLMClient created"
229 );
230
231 Ok(Self { provider })
232 }
233
234 pub fn from_env() -> LlmResult<Self> {
243 let config = LLMConfig::from_env()?;
244 Self::from_config(config)
245 }
246
247 pub fn from_config(config: LLMConfig) -> LlmResult<Self> {
257 let provider_name = config.provider.provider_name();
258 let model = config.provider.default_model().to_string();
259
260 log_debug!(
261 target_provider = provider_name,
262 model = %model,
263 "Creating UnifiedLLMClient from config"
264 );
265
266 Self::create(provider_name, model, config)
267 }
268}
269
270#[async_trait]
273impl LlmProvider for UnifiedLLMClient {
274 #[cfg(feature = "events")]
275 async fn execute_llm(
276 &self,
277 request: UnifiedLLMRequest,
278 current_tool_round: Option<ToolCallingRound>,
279 config: Option<RequestConfig>,
280 ) -> crate::provider::Result<(Response, Vec<LLMBusinessEvent>)> {
281 match &self.provider {
283 LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
284 LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
285 LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
286 LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
287 }
288
289 match &self.provider {
291 LLMProvider::Anthropic(p) => p.execute_llm(request, current_tool_round, config).await,
292 LLMProvider::OpenAI(p) => p.execute_llm(request, current_tool_round, config).await,
293 LLMProvider::LMStudio(p) => p.execute_llm(request, current_tool_round, config).await,
294 LLMProvider::Ollama(p) => p.execute_llm(request, current_tool_round, config).await,
295 }
296 }
297
298 #[cfg(not(feature = "events"))]
299 async fn execute_llm(
300 &self,
301 request: UnifiedLLMRequest,
302 current_tool_round: Option<ToolCallingRound>,
303 config: Option<RequestConfig>,
304 ) -> crate::provider::Result<Response> {
305 match &self.provider {
307 LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
308 LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
309 LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
310 LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
311 }
312
313 match &self.provider {
315 LLMProvider::Anthropic(p) => p.execute_llm(request, current_tool_round, config).await,
316 LLMProvider::OpenAI(p) => p.execute_llm(request, current_tool_round, config).await,
317 LLMProvider::LMStudio(p) => p.execute_llm(request, current_tool_round, config).await,
318 LLMProvider::Ollama(p) => p.execute_llm(request, current_tool_round, config).await,
319 }
320 }
321
322 #[cfg(feature = "events")]
323 async fn execute_structured_llm(
324 &self,
325 request: UnifiedLLMRequest,
326 current_tool_round: Option<ToolCallingRound>,
327 schema: serde_json::Value,
328 config: Option<RequestConfig>,
329 ) -> crate::provider::Result<(Response, Vec<LLMBusinessEvent>)> {
330 match &self.provider {
332 LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
333 LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
334 LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
335 LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
336 }
337
338 match &self.provider {
340 LLMProvider::Anthropic(p) => {
341 p.execute_structured_llm(request, current_tool_round, schema, config)
342 .await
343 }
344 LLMProvider::OpenAI(p) => {
345 p.execute_structured_llm(request, current_tool_round, schema, config)
346 .await
347 }
348 LLMProvider::LMStudio(p) => {
349 p.execute_structured_llm(request, current_tool_round, schema, config)
350 .await
351 }
352 LLMProvider::Ollama(p) => {
353 p.execute_structured_llm(request, current_tool_round, schema, config)
354 .await
355 }
356 }
357 }
358
359 #[cfg(not(feature = "events"))]
360 async fn execute_structured_llm(
361 &self,
362 request: UnifiedLLMRequest,
363 current_tool_round: Option<ToolCallingRound>,
364 schema: serde_json::Value,
365 config: Option<RequestConfig>,
366 ) -> crate::provider::Result<Response> {
367 match &self.provider {
369 LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
370 LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
371 LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
372 LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
373 }
374
375 match &self.provider {
377 LLMProvider::Anthropic(p) => {
378 p.execute_structured_llm(request, current_tool_round, schema, config)
379 .await
380 }
381 LLMProvider::OpenAI(p) => {
382 p.execute_structured_llm(request, current_tool_round, schema, config)
383 .await
384 }
385 LLMProvider::LMStudio(p) => {
386 p.execute_structured_llm(request, current_tool_round, schema, config)
387 .await
388 }
389 LLMProvider::Ollama(p) => {
390 p.execute_structured_llm(request, current_tool_round, schema, config)
391 .await
392 }
393 }
394 }
395
396 fn provider_name(&self) -> &'static str {
397 match &self.provider {
398 LLMProvider::Anthropic(_) => "anthropic",
399 LLMProvider::OpenAI(_) => "openai",
400 LLMProvider::LMStudio(_) => "lmstudio",
401 LLMProvider::Ollama(_) => "ollama",
402 }
403 }
404}