multi_llm/
client.rs

1//! Unified LLM client for multi-provider operations.
2//!
3//! This module provides [`UnifiedLLMClient`], the main entry point for
4//! interacting with LLM providers through multi-llm.
5
6use crate::config::{AnthropicConfig, LLMConfig, LMStudioConfig, OllamaConfig, OpenAIConfig};
7use crate::error::{LlmError, LlmResult};
8use crate::logging::log_debug;
9use crate::messages::UnifiedLLMRequest;
10#[cfg(feature = "events")]
11use crate::provider::LLMBusinessEvent;
12use crate::provider::{LlmProvider, RequestConfig, Response, ToolCallingRound};
13use crate::providers::{AnthropicProvider, LMStudioProvider, OllamaProvider, OpenAIProvider};
14use async_trait::async_trait;
15
16/// Internal provider enum (not exposed publicly).
17enum LLMProvider {
18    Anthropic(AnthropicProvider),
19    OpenAI(OpenAIProvider),
20    LMStudio(LMStudioProvider),
21    Ollama(OllamaProvider),
22}
23
24/// Unified client for multi-provider LLM operations.
25///
26/// `UnifiedLLMClient` is the primary interface for using multi-llm. It wraps
27/// all supported providers behind a single [`LlmProvider`] interface, allowing
28/// you to switch providers without changing your application code.
29///
30/// # Quick Start
31///
32/// ```rust,no_run
33/// use multi_llm::{unwrap_response, UnifiedLLMClient, LLMConfig, UnifiedMessage, UnifiedLLMRequest, LlmProvider};
34///
35/// # async fn example() -> anyhow::Result<()> {
36/// // Create client from environment variables
37/// let client = UnifiedLLMClient::from_env()?;
38///
39/// // Build a request
40/// let request = UnifiedLLMRequest::new(vec![
41///     UnifiedMessage::system("You are a helpful assistant."),
42///     UnifiedMessage::user("What's the capital of France?"),
43/// ]);
44///
45/// // Execute the request
46/// let response = unwrap_response!(client.execute_llm(request, None, None).await?);
47/// println!("Response: {}", response.content);
48/// # Ok(())
49/// # }
50/// ```
51///
52/// # From Configuration
53///
54/// ```rust,no_run
55/// use multi_llm::{UnifiedLLMClient, LLMConfig, OpenAIConfig, DefaultLLMParams};
56///
57/// let config = LLMConfig {
58///     provider: Box::new(OpenAIConfig {
59///         api_key: Some("sk-...".to_string()),
60///         default_model: "gpt-4-turbo-preview".to_string(),
61///         ..Default::default()
62///     }),
63///     default_params: DefaultLLMParams::default(),
64/// };
65///
66/// let client = UnifiedLLMClient::from_config(config)?;
67/// # Ok::<(), multi_llm::LlmError>(())
68/// ```
69///
70/// # Tool Calling
71///
72/// ```rust,no_run
73/// use multi_llm::{unwrap_response, UnifiedLLMClient, UnifiedMessage, UnifiedLLMRequest, RequestConfig, Tool, ToolChoice, LlmProvider};
74///
75/// # async fn example(client: UnifiedLLMClient) -> anyhow::Result<()> {
76/// // Define a tool
77/// let weather_tool = Tool {
78///     name: "get_weather".to_string(),
79///     description: "Get current weather".to_string(),
80///     parameters: serde_json::json!({
81///         "type": "object",
82///         "properties": {
83///             "city": {"type": "string"}
84///         },
85///         "required": ["city"]
86///     }),
87/// };
88///
89/// let request = UnifiedLLMRequest::new(vec![
90///     UnifiedMessage::user("What's the weather in Paris?"),
91/// ]);
92///
93/// let config = RequestConfig {
94///     tools: vec![weather_tool],
95///     tool_choice: Some(ToolChoice::Auto),
96///     ..Default::default()
97/// };
98///
99/// let response = unwrap_response!(client.execute_llm(request, None, Some(config)).await?);
100///
101/// // Check for tool calls
102/// if !response.tool_calls.is_empty() {
103///     for call in &response.tool_calls {
104///         println!("Tool call: {} with {}", call.name, call.arguments);
105///         // Execute tool and continue conversation...
106///     }
107/// }
108/// # Ok(())
109/// # }
110/// ```
111///
112/// # Supported Providers
113///
114/// | Provider | Config Type | API Key Required |
115/// |----------|------------|------------------|
116/// | Anthropic | [`AnthropicConfig`] | Yes |
117/// | OpenAI | [`OpenAIConfig`] | Yes |
118/// | Ollama | [`OllamaConfig`] | No (local) |
119/// | LM Studio | [`LMStudioConfig`] | No (local) |
120pub struct UnifiedLLMClient {
121    provider: LLMProvider,
122}
123
124impl UnifiedLLMClient {
125    /// Create Anthropic provider from config
126    fn create_anthropic_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
127        let anthropic_config = config
128            .provider
129            .as_any()
130            .downcast_ref::<AnthropicConfig>()
131            .ok_or_else(|| LlmError::configuration_error("Invalid Anthropic configuration"))?;
132
133        let provider =
134            AnthropicProvider::new(anthropic_config.clone(), config.default_params.clone())
135                .map_err(|e| {
136                    LlmError::configuration_error(format!(
137                        "Failed to create Anthropic provider for model {}: {}",
138                        model, e
139                    ))
140                })?;
141
142        Ok(LLMProvider::Anthropic(provider))
143    }
144
145    /// Create OpenAI provider from config
146    fn create_openai_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
147        let openai_config = config
148            .provider
149            .as_any()
150            .downcast_ref::<OpenAIConfig>()
151            .ok_or_else(|| LlmError::configuration_error("Invalid OpenAI configuration"))?;
152
153        let provider = OpenAIProvider::new(openai_config.clone(), config.default_params.clone())
154            .map_err(|e| {
155                LlmError::configuration_error(format!(
156                    "Failed to create OpenAI provider for model {}: {}",
157                    model, e
158                ))
159            })?;
160
161        Ok(LLMProvider::OpenAI(provider))
162    }
163
164    /// Create LMStudio provider from config
165    fn create_lmstudio_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
166        let lmstudio_config = config
167            .provider
168            .as_any()
169            .downcast_ref::<LMStudioConfig>()
170            .ok_or_else(|| LlmError::configuration_error("Invalid LM Studio configuration"))?;
171
172        let provider =
173            LMStudioProvider::new(lmstudio_config.clone(), config.default_params.clone()).map_err(
174                |e| {
175                    LlmError::configuration_error(format!(
176                        "Failed to create LM Studio provider for model {}: {}",
177                        model, e
178                    ))
179                },
180            )?;
181
182        Ok(LLMProvider::LMStudio(provider))
183    }
184
185    /// Create Ollama provider from config
186    fn create_ollama_provider(config: &LLMConfig, model: &str) -> LlmResult<LLMProvider> {
187        let ollama_config = config
188            .provider
189            .as_any()
190            .downcast_ref::<OllamaConfig>()
191            .ok_or_else(|| LlmError::configuration_error("Invalid Ollama configuration"))?;
192
193        let provider = OllamaProvider::new(ollama_config.clone(), config.default_params.clone())
194            .map_err(|e| {
195                LlmError::configuration_error(format!(
196                    "Failed to create Ollama provider for model {}: {}",
197                    model, e
198                ))
199            })?;
200
201        Ok(LLMProvider::Ollama(provider))
202    }
203
204    /// Factory method to create UnifiedLLMClient with all parameters
205    /// This is the primary constructor for production use
206    ///
207    /// # Errors
208    ///
209    /// Returns [`LlmError::UnsupportedProvider`] if the provider name is not recognized.
210    /// Supported providers are: "anthropic", "openai", "lmstudio", "ollama".
211    ///
212    /// Returns [`LlmError::ConfigurationError`] if:
213    /// - The provider configuration type doesn't match the provider name
214    /// - Required configuration fields are missing (e.g., API key for OpenAI/Anthropic)
215    /// - Configuration validation fails (e.g., invalid base URL format)
216    pub fn create(provider_name: &str, model: String, config: LLMConfig) -> LlmResult<Self> {
217        let provider = match provider_name {
218            "anthropic" => Self::create_anthropic_provider(&config, &model)?,
219            "openai" => Self::create_openai_provider(&config, &model)?,
220            "lmstudio" => Self::create_lmstudio_provider(&config, &model)?,
221            "ollama" => Self::create_ollama_provider(&config, &model)?,
222            _ => return Err(LlmError::unsupported_provider(provider_name)),
223        };
224
225        log_debug!(
226            provider = provider_name,
227            model = %model,
228            "UnifiedLLMClient created"
229        );
230
231        Ok(Self { provider })
232    }
233
234    /// Create a client using environment variables for configuration
235    ///
236    /// # Errors
237    ///
238    /// Returns [`LlmError::ConfigurationError`] if:
239    /// - Required environment variables are missing
240    /// - Environment variable values are invalid or malformed
241    /// - Provider configuration validation fails
242    pub fn from_env() -> LlmResult<Self> {
243        let config = LLMConfig::from_env()?;
244        Self::from_config(config)
245    }
246
247    /// Create a client from an LLMConfig (backward compatibility)
248    ///
249    /// # Errors
250    ///
251    /// Returns [`LlmError::UnsupportedProvider`] if the provider name in the config is not recognized.
252    ///
253    /// Returns [`LlmError::ConfigurationError`] if:
254    /// - Provider configuration validation fails
255    /// - Required provider-specific settings are missing
256    pub fn from_config(config: LLMConfig) -> LlmResult<Self> {
257        let provider_name = config.provider.provider_name();
258        let model = config.provider.default_model().to_string();
259
260        log_debug!(
261            target_provider = provider_name,
262            model = %model,
263            "Creating UnifiedLLMClient from config"
264        );
265
266        Self::create(provider_name, model, config)
267    }
268}
269
270/// Implement LlmProvider for UnifiedLLMClient
271/// Just delegates to the underlying provider - providers already handle events feature correctly
272#[async_trait]
273impl LlmProvider for UnifiedLLMClient {
274    #[cfg(feature = "events")]
275    async fn execute_llm(
276        &self,
277        request: UnifiedLLMRequest,
278        current_tool_round: Option<ToolCallingRound>,
279        config: Option<RequestConfig>,
280    ) -> crate::provider::Result<(Response, Vec<LLMBusinessEvent>)> {
281        // Restore default retry policy
282        match &self.provider {
283            LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
284            LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
285            LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
286            LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
287        }
288
289        // Delegate to provider
290        match &self.provider {
291            LLMProvider::Anthropic(p) => p.execute_llm(request, current_tool_round, config).await,
292            LLMProvider::OpenAI(p) => p.execute_llm(request, current_tool_round, config).await,
293            LLMProvider::LMStudio(p) => p.execute_llm(request, current_tool_round, config).await,
294            LLMProvider::Ollama(p) => p.execute_llm(request, current_tool_round, config).await,
295        }
296    }
297
298    #[cfg(not(feature = "events"))]
299    async fn execute_llm(
300        &self,
301        request: UnifiedLLMRequest,
302        current_tool_round: Option<ToolCallingRound>,
303        config: Option<RequestConfig>,
304    ) -> crate::provider::Result<Response> {
305        // Restore default retry policy
306        match &self.provider {
307            LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
308            LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
309            LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
310            LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
311        }
312
313        // Delegate to provider
314        match &self.provider {
315            LLMProvider::Anthropic(p) => p.execute_llm(request, current_tool_round, config).await,
316            LLMProvider::OpenAI(p) => p.execute_llm(request, current_tool_round, config).await,
317            LLMProvider::LMStudio(p) => p.execute_llm(request, current_tool_round, config).await,
318            LLMProvider::Ollama(p) => p.execute_llm(request, current_tool_round, config).await,
319        }
320    }
321
322    #[cfg(feature = "events")]
323    async fn execute_structured_llm(
324        &self,
325        request: UnifiedLLMRequest,
326        current_tool_round: Option<ToolCallingRound>,
327        schema: serde_json::Value,
328        config: Option<RequestConfig>,
329    ) -> crate::provider::Result<(Response, Vec<LLMBusinessEvent>)> {
330        // Restore default retry policy
331        match &self.provider {
332            LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
333            LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
334            LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
335            LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
336        }
337
338        // Delegate to provider
339        match &self.provider {
340            LLMProvider::Anthropic(p) => {
341                p.execute_structured_llm(request, current_tool_round, schema, config)
342                    .await
343            }
344            LLMProvider::OpenAI(p) => {
345                p.execute_structured_llm(request, current_tool_round, schema, config)
346                    .await
347            }
348            LLMProvider::LMStudio(p) => {
349                p.execute_structured_llm(request, current_tool_round, schema, config)
350                    .await
351            }
352            LLMProvider::Ollama(p) => {
353                p.execute_structured_llm(request, current_tool_round, schema, config)
354                    .await
355            }
356        }
357    }
358
359    #[cfg(not(feature = "events"))]
360    async fn execute_structured_llm(
361        &self,
362        request: UnifiedLLMRequest,
363        current_tool_round: Option<ToolCallingRound>,
364        schema: serde_json::Value,
365        config: Option<RequestConfig>,
366    ) -> crate::provider::Result<Response> {
367        // Restore default retry policy
368        match &self.provider {
369            LLMProvider::Anthropic(p) => p.restore_default_retry_policy().await,
370            LLMProvider::OpenAI(p) => p.restore_default_retry_policy().await,
371            LLMProvider::LMStudio(p) => p.restore_default_retry_policy().await,
372            LLMProvider::Ollama(p) => p.restore_default_retry_policy().await,
373        }
374
375        // Delegate to provider
376        match &self.provider {
377            LLMProvider::Anthropic(p) => {
378                p.execute_structured_llm(request, current_tool_round, schema, config)
379                    .await
380            }
381            LLMProvider::OpenAI(p) => {
382                p.execute_structured_llm(request, current_tool_round, schema, config)
383                    .await
384            }
385            LLMProvider::LMStudio(p) => {
386                p.execute_structured_llm(request, current_tool_round, schema, config)
387                    .await
388            }
389            LLMProvider::Ollama(p) => {
390                p.execute_structured_llm(request, current_tool_round, schema, config)
391                    .await
392            }
393        }
394    }
395
396    fn provider_name(&self) -> &'static str {
397        match &self.provider {
398            LLMProvider::Anthropic(_) => "anthropic",
399            LLMProvider::OpenAI(_) => "openai",
400            LLMProvider::LMStudio(_) => "lmstudio",
401            LLMProvider::Ollama(_) => "ollama",
402        }
403    }
404}