multi_llm/config.rs
1//! Configuration types for LLM providers.
2//!
3//! This module provides configuration structures for all supported LLM providers.
4//! Each provider has its own config type implementing [`ProviderConfig`], plus
5//! shared types for default parameters and dual-path setups.
6//!
7//! # Quick Start
8//!
9//! ```rust,no_run
10//! use multi_llm::{LLMConfig, OpenAIConfig, DefaultLLMParams, UnifiedLLMClient};
11//!
12//! // Create config programmatically
13//! let config = LLMConfig {
14//! provider: Box::new(OpenAIConfig {
15//! api_key: Some("sk-...".to_string()),
16//! ..Default::default()
17//! }),
18//! default_params: DefaultLLMParams::default(),
19//! };
20//!
21//! let client = UnifiedLLMClient::from_config(config)?;
22//! # Ok::<(), multi_llm::LlmError>(())
23//! ```
24//!
25//! # From Environment Variables
26//!
27//! ```rust,no_run
28//! use multi_llm::{LLMConfig, UnifiedLLMClient};
29//!
30//! // Uses AI_PROVIDER and provider-specific env vars
31//! let config = LLMConfig::from_env()?;
32//! let client = UnifiedLLMClient::from_config(config)?;
33//! # Ok::<(), multi_llm::LlmError>(())
34//! ```
35//!
36//! # Provider-Specific Configs
37//!
38//! | Provider | Config Type | Required Env Vars |
39//! |----------|------------|-------------------|
40//! | OpenAI | [`OpenAIConfig`] | `OPENAI_API_KEY` |
41//! | Anthropic | [`AnthropicConfig`] | `ANTHROPIC_API_KEY` |
42//! | Ollama | [`OllamaConfig`] | (none, local) |
43//! | LM Studio | [`LMStudioConfig`] | (none, local) |
44
45use crate::error::{LlmError, LlmResult};
46use crate::internals::retry::RetryPolicy;
47use crate::logging::log_debug;
48use serde::{Deserialize, Serialize};
49use std::any::Any;
50use std::fmt::Debug;
51
52/// Trait for provider-specific configuration.
53///
54/// All provider configs (OpenAI, Anthropic, etc.) implement this trait.
55/// You typically don't need to implement this yourself unless adding
56/// a custom provider.
57///
58/// # Provided Implementations
59///
60/// - [`OpenAIConfig`]
61/// - [`AnthropicConfig`]
62/// - [`OllamaConfig`]
63/// - [`LMStudioConfig`]
64pub trait ProviderConfig: Send + Sync + Debug + Any {
65 /// Get the provider identifier (e.g., "openai", "anthropic").
66 fn provider_name(&self) -> &'static str;
67
68 /// Get the maximum context window size in tokens.
69 fn max_context_tokens(&self) -> usize;
70
71 /// Validate that the configuration is complete and valid.
72 ///
73 /// # Errors
74 ///
75 /// Returns [`LlmError::ConfigurationError`] if:
76 /// - Required fields are missing (e.g., API key for cloud providers)
77 /// - Field values are invalid (e.g., malformed URLs)
78 /// - Provider-specific validation fails
79 fn validate(&self) -> LlmResult<()>;
80
81 /// Get the base URL for API requests.
82 fn base_url(&self) -> &str;
83
84 /// Get the API key, if one is configured.
85 fn api_key(&self) -> Option<&str>;
86
87 /// Get the default model name for this provider.
88 fn default_model(&self) -> &str;
89
90 /// Downcast helper for accessing concrete config types.
91 fn as_any(&self) -> &dyn Any;
92
93 /// Get the retry policy for transient failures.
94 fn retry_policy(&self) -> &RetryPolicy;
95}
96
97/// System-wide LLM configuration.
98///
99/// Combines a provider-specific configuration with default model parameters.
100/// This is the primary config type used to create a [`UnifiedLLMClient`](crate::UnifiedLLMClient).
101///
102/// # Example
103///
104/// ```rust,no_run
105/// use multi_llm::{LLMConfig, AnthropicConfig, DefaultLLMParams};
106///
107/// let config = LLMConfig {
108/// provider: Box::new(AnthropicConfig {
109/// api_key: Some("sk-ant-...".to_string()),
110/// default_model: "claude-3-5-sonnet-20241022".to_string(),
111/// ..Default::default()
112/// }),
113/// default_params: DefaultLLMParams {
114/// temperature: 0.7,
115/// max_tokens: 4096,
116/// ..Default::default()
117/// },
118/// };
119/// ```
120///
121/// # From Environment
122///
123/// Use [`from_env()`](Self::from_env) to load from environment variables:
124/// - `AI_PROVIDER`: Provider name ("anthropic", "openai", "ollama", "lmstudio")
125/// - Provider-specific vars (e.g., `ANTHROPIC_API_KEY`, `OPENAI_API_KEY`)
126#[derive(Debug)]
127pub struct LLMConfig {
128 /// The provider-specific configuration.
129 ///
130 /// Contains API keys, endpoints, model selection, and provider features.
131 pub provider: Box<dyn ProviderConfig>,
132
133 /// Default parameters for LLM requests.
134 ///
135 /// Applied to all requests unless overridden by [`RequestConfig`](crate::RequestConfig).
136 pub default_params: DefaultLLMParams,
137}
138
139impl LLMConfig {
140 /// Clone provider config by downcasting to concrete type
141 fn clone_provider(&self) -> Box<dyn ProviderConfig> {
142 let any_ref = self.provider.as_any();
143
144 if let Some(config) = any_ref.downcast_ref::<AnthropicConfig>() {
145 return Box::new(config.clone());
146 }
147 if let Some(config) = any_ref.downcast_ref::<OpenAIConfig>() {
148 return Box::new(config.clone());
149 }
150 if let Some(config) = any_ref.downcast_ref::<LMStudioConfig>() {
151 return Box::new(config.clone());
152 }
153 if let Some(config) = any_ref.downcast_ref::<OllamaConfig>() {
154 return Box::new(config.clone());
155 }
156
157 // This should never happen as all provider types are covered above
158 unreachable!("Unknown provider type - all provider types should be handled")
159 }
160}
161
162impl Clone for LLMConfig {
163 fn clone(&self) -> Self {
164 Self {
165 provider: self.clone_provider(),
166 default_params: self.default_params.clone(),
167 }
168 }
169}
170
171/// Default parameters for LLM generation.
172///
173/// These values are used when a request doesn't specify its own values.
174/// All parameters have sensible defaults that work well for most use cases.
175///
176/// # Defaults
177///
178/// | Parameter | Default | Description |
179/// |-----------|---------|-------------|
180/// | `temperature` | 0.7 | Balanced creativity/consistency |
181/// | `max_tokens` | 1000 | Reasonable response length |
182/// | `top_p` | 0.9 | Standard nucleus sampling |
183/// | `top_k` | 40 | Vocabulary restriction |
184/// | `min_p` | 0.05 | Minimum probability filter |
185/// | `presence_penalty` | 0.0 | No repetition penalty |
186///
187/// # Example
188///
189/// ```rust
190/// use multi_llm::DefaultLLMParams;
191///
192/// // Use defaults
193/// let params = DefaultLLMParams::default();
194///
195/// // Or customize
196/// let params = DefaultLLMParams {
197/// temperature: 0.2, // More deterministic
198/// max_tokens: 4096, // Longer responses
199/// ..Default::default()
200/// };
201/// ```
202#[derive(Debug, Clone, Serialize, Deserialize)]
203pub struct DefaultLLMParams {
204 /// Temperature for response randomness (0.0 = deterministic, 2.0 = very random).
205 pub temperature: f64,
206
207 /// Maximum tokens to generate per response.
208 pub max_tokens: u32,
209
210 /// Top-p (nucleus) sampling threshold.
211 pub top_p: f64,
212
213 /// Top-k sampling limit.
214 pub top_k: u32,
215
216 /// Minimum probability filter.
217 pub min_p: f64,
218
219 /// Presence penalty to reduce repetition.
220 pub presence_penalty: f64,
221}
222
223impl Default for DefaultLLMParams {
224 fn default() -> Self {
225 Self {
226 temperature: 0.7,
227 max_tokens: 1000,
228 top_p: 0.9,
229 top_k: 40,
230 min_p: 0.05,
231 presence_penalty: 0.0,
232 }
233 }
234}
235
236/// Configuration for Anthropic Claude models.
237///
238/// Claude models support prompt caching for significant cost savings (90% on cache reads).
239/// Enable caching for static system prompts and context that doesn't change often.
240///
241/// # Example
242///
243/// ```rust,no_run
244/// use multi_llm::AnthropicConfig;
245///
246/// let config = AnthropicConfig {
247/// api_key: Some("sk-ant-api03-...".to_string()),
248/// default_model: "claude-3-5-sonnet-20241022".to_string(),
249/// enable_prompt_caching: true,
250/// cache_ttl: "1h".to_string(), // 1-hour cache
251/// ..Default::default()
252/// };
253/// ```
254///
255/// # Environment Variables
256///
257/// - `ANTHROPIC_API_KEY`: API key (required)
258///
259/// # Models
260///
261/// - `claude-3-5-sonnet-20241022`: Latest Sonnet (recommended)
262/// - `claude-3-opus-20240229`: Most capable
263/// - `claude-3-haiku-20240307`: Fastest, cheapest
264#[derive(Debug, Clone, Serialize, Deserialize)]
265pub struct AnthropicConfig {
266 /// Anthropic API key (starts with "sk-ant-").
267 pub api_key: Option<String>,
268
269 /// Base URL for API requests (default: `https://api.anthropic.com`).
270 pub base_url: String,
271
272 /// Default model to use for requests.
273 pub default_model: String,
274
275 /// Maximum context window size in tokens (200K for Claude 3).
276 pub max_context_tokens: usize,
277
278 /// Retry policy for transient failures.
279 pub retry_policy: RetryPolicy,
280
281 /// Enable prompt caching for cost savings.
282 ///
283 /// When enabled, static system prompts and context are cached,
284 /// reducing costs by 90% on cache reads.
285 pub enable_prompt_caching: bool,
286
287 /// Cache TTL setting: "5m" for 5-minute cache, "1h" for 1-hour cache.
288 ///
289 /// - "5m": Ephemeral cache, 1.25x write cost, good for development
290 /// - "1h": Extended cache, 2x write cost, good for production
291 pub cache_ttl: String,
292}
293
294impl Default for AnthropicConfig {
295 fn default() -> Self {
296 Self {
297 api_key: None,
298 base_url: "https://api.anthropic.com".to_string(),
299 default_model: "claude-3-5-sonnet-20241022".to_string(),
300 max_context_tokens: 200_000,
301 retry_policy: RetryPolicy::default(),
302 enable_prompt_caching: true, // Enable by default for cost savings
303 cache_ttl: "1h".to_string(), // Use 1-hour cache for story writing sessions with infrequent personality changes
304 }
305 }
306}
307
308impl ProviderConfig for AnthropicConfig {
309 fn provider_name(&self) -> &'static str {
310 "anthropic"
311 }
312
313 fn max_context_tokens(&self) -> usize {
314 self.max_context_tokens
315 }
316
317 fn validate(&self) -> LlmResult<()> {
318 if self.api_key.is_none() {
319 return Err(LlmError::configuration_error(
320 "Anthropic API key is required",
321 ));
322 }
323 Ok(())
324 }
325
326 fn base_url(&self) -> &str {
327 &self.base_url
328 }
329
330 fn api_key(&self) -> Option<&str> {
331 self.api_key.as_deref()
332 }
333
334 fn default_model(&self) -> &str {
335 &self.default_model
336 }
337
338 fn as_any(&self) -> &dyn Any {
339 self
340 }
341
342 fn retry_policy(&self) -> &RetryPolicy {
343 &self.retry_policy
344 }
345}
346
347/// Configuration for OpenAI GPT models.
348///
349/// Supports GPT-4, GPT-3.5, and other OpenAI models. Also works with
350/// OpenAI-compatible APIs by changing the base URL.
351///
352/// # Example
353///
354/// ```rust,no_run
355/// use multi_llm::OpenAIConfig;
356///
357/// let config = OpenAIConfig {
358/// api_key: Some("sk-...".to_string()),
359/// default_model: "gpt-4-turbo-preview".to_string(),
360/// ..Default::default()
361/// };
362/// ```
363///
364/// # Environment Variables
365///
366/// - `OPENAI_API_KEY`: API key (required)
367/// - `OPENAI_BASE_URL`: Custom base URL (optional)
368///
369/// # Models
370///
371/// - `gpt-4-turbo-preview`: Latest GPT-4 Turbo (128K context)
372/// - `gpt-4`: Standard GPT-4 (8K context)
373/// - `gpt-3.5-turbo`: Fast and affordable (16K context)
374#[derive(Debug, Clone, Serialize, Deserialize)]
375pub struct OpenAIConfig {
376 /// OpenAI API key (starts with "sk-").
377 pub api_key: Option<String>,
378
379 /// Base URL for API requests (default: `https://api.openai.com`).
380 pub base_url: String,
381
382 /// Default model to use for requests.
383 pub default_model: String,
384
385 /// Maximum context window size in tokens.
386 pub max_context_tokens: usize,
387
388 /// Retry policy for transient failures.
389 pub retry_policy: RetryPolicy,
390}
391
392impl Default for OpenAIConfig {
393 fn default() -> Self {
394 Self {
395 api_key: None,
396 base_url: "https://api.openai.com".to_string(),
397 default_model: "gpt-4".to_string(),
398 max_context_tokens: 128_000,
399 retry_policy: RetryPolicy::default(),
400 }
401 }
402}
403
404impl ProviderConfig for OpenAIConfig {
405 fn provider_name(&self) -> &'static str {
406 "openai"
407 }
408
409 fn max_context_tokens(&self) -> usize {
410 self.max_context_tokens
411 }
412
413 fn validate(&self) -> LlmResult<()> {
414 if self.api_key.is_none() {
415 return Err(LlmError::configuration_error("OpenAI API key is required"));
416 }
417 Ok(())
418 }
419
420 fn base_url(&self) -> &str {
421 &self.base_url
422 }
423
424 fn api_key(&self) -> Option<&str> {
425 self.api_key.as_deref()
426 }
427
428 fn default_model(&self) -> &str {
429 &self.default_model
430 }
431
432 fn as_any(&self) -> &dyn Any {
433 self
434 }
435
436 fn retry_policy(&self) -> &RetryPolicy {
437 &self.retry_policy
438 }
439}
440
441/// Configuration for LM Studio local models.
442///
443/// LM Studio provides an OpenAI-compatible API for running local models.
444/// No API key is required since it runs locally.
445///
446/// # Example
447///
448/// ```rust
449/// use multi_llm::LMStudioConfig;
450///
451/// let config = LMStudioConfig {
452/// base_url: "http://localhost:1234".to_string(),
453/// default_model: "local-model".to_string(),
454/// max_context_tokens: 4096,
455/// ..Default::default()
456/// };
457/// ```
458///
459/// # Environment Variables
460///
461/// - `LM_STUDIO_BASE_URL` or `OPENAI_BASE_URL`: Server URL (default: `http://localhost:1234`)
462///
463/// # Notes
464///
465/// - Start LM Studio server before making requests
466/// - Context window depends on the loaded model
467/// - Model name in config is ignored; uses whatever model is loaded in LM Studio
468#[derive(Debug, Clone, Serialize, Deserialize)]
469pub struct LMStudioConfig {
470 /// Base URL for the LM Studio server (default: `http://localhost:1234`).
471 pub base_url: String,
472
473 /// Default model name (LM Studio uses the loaded model regardless).
474 pub default_model: String,
475
476 /// Maximum context window size (depends on loaded model).
477 pub max_context_tokens: usize,
478
479 /// Retry policy for transient failures.
480 pub retry_policy: RetryPolicy,
481}
482
483impl Default for LMStudioConfig {
484 fn default() -> Self {
485 Self {
486 base_url: "http://localhost:1234".to_string(),
487 default_model: "local-model".to_string(),
488 max_context_tokens: 4_096,
489 retry_policy: RetryPolicy::default(),
490 }
491 }
492}
493
494impl ProviderConfig for LMStudioConfig {
495 fn provider_name(&self) -> &'static str {
496 "lmstudio"
497 }
498
499 fn max_context_tokens(&self) -> usize {
500 self.max_context_tokens
501 }
502
503 fn validate(&self) -> LlmResult<()> {
504 if self.base_url.is_empty() {
505 return Err(LlmError::configuration_error(
506 "LM Studio base URL is required",
507 ));
508 }
509 Ok(())
510 }
511
512 fn base_url(&self) -> &str {
513 &self.base_url
514 }
515
516 fn api_key(&self) -> Option<&str> {
517 None // LM Studio doesn't require API key
518 }
519
520 fn default_model(&self) -> &str {
521 &self.default_model
522 }
523
524 fn as_any(&self) -> &dyn Any {
525 self
526 }
527
528 fn retry_policy(&self) -> &RetryPolicy {
529 &self.retry_policy
530 }
531}
532
533/// Configuration for Ollama local models.
534///
535/// Ollama is a tool for running open-source LLMs locally. It provides
536/// an OpenAI-compatible API and doesn't require an API key.
537///
538/// # Example
539///
540/// ```rust
541/// use multi_llm::OllamaConfig;
542///
543/// let config = OllamaConfig {
544/// base_url: "http://localhost:11434".to_string(),
545/// default_model: "llama2".to_string(),
546/// max_context_tokens: 4096,
547/// ..Default::default()
548/// };
549/// ```
550///
551/// # Environment Variables
552///
553/// None required (local service).
554///
555/// # Popular Models
556///
557/// - `llama2`: Meta's Llama 2
558/// - `mistral`: Mistral AI's model
559/// - `codellama`: Code-specialized Llama
560/// - `phi`: Microsoft's Phi model
561///
562/// Install models with: `ollama pull <model-name>`
563#[derive(Debug, Clone, Serialize, Deserialize)]
564pub struct OllamaConfig {
565 /// Base URL for the Ollama server (default: `http://localhost:11434`).
566 pub base_url: String,
567
568 /// Default model to use (must be pulled with `ollama pull`).
569 pub default_model: String,
570
571 /// Maximum context window size (depends on model).
572 pub max_context_tokens: usize,
573
574 /// Retry policy for transient failures.
575 pub retry_policy: RetryPolicy,
576}
577
578impl Default for OllamaConfig {
579 fn default() -> Self {
580 Self {
581 base_url: "http://localhost:11434".to_string(),
582 default_model: "llama2".to_string(),
583 max_context_tokens: 4_096,
584 retry_policy: RetryPolicy::default(),
585 }
586 }
587}
588
589impl ProviderConfig for OllamaConfig {
590 fn provider_name(&self) -> &'static str {
591 "ollama"
592 }
593
594 fn max_context_tokens(&self) -> usize {
595 self.max_context_tokens
596 }
597
598 fn validate(&self) -> LlmResult<()> {
599 if self.base_url.is_empty() {
600 return Err(LlmError::configuration_error("Ollama base URL is required"));
601 }
602 Ok(())
603 }
604
605 fn base_url(&self) -> &str {
606 &self.base_url
607 }
608
609 fn api_key(&self) -> Option<&str> {
610 None // Ollama doesn't require API key
611 }
612
613 fn default_model(&self) -> &str {
614 &self.default_model
615 }
616
617 fn as_any(&self) -> &dyn Any {
618 self
619 }
620
621 fn retry_policy(&self) -> &RetryPolicy {
622 &self.retry_policy
623 }
624}
625
626impl LLMConfig {
627 /// Create configuration for a specific provider with generic parameters
628 /// This is the main factory method for creating provider configurations
629 ///
630 /// # Errors
631 ///
632 /// Returns [`LlmError::UnsupportedProvider`] if the provider name is not recognized.
633 /// Supported providers are: "anthropic", "openai", "lmstudio".
634 ///
635 /// Returns [`LlmError::ConfigurationError`] if:
636 /// - API key format validation fails
637 /// - Provider-specific configuration validation fails
638 /// - Required fields for the provider are missing
639 pub fn create_provider(
640 provider_name: &str,
641 api_key: Option<String>,
642 base_url: Option<String>,
643 model: Option<String>,
644 ) -> LlmResult<Self> {
645 log_debug!(
646 provider = %provider_name,
647 has_api_key = api_key.is_some(),
648 has_base_url = base_url.is_some(),
649 has_model = model.is_some(),
650 "Creating provider configuration"
651 );
652
653 let provider: Box<dyn ProviderConfig> = match provider_name.to_lowercase().as_str() {
654 "anthropic" => Self::create_anthropic_provider(api_key, base_url, model),
655 "openai" => Self::create_openai_provider(api_key, base_url, model),
656 "lmstudio" => Self::create_lmstudio_provider(base_url, model),
657 "ollama" => Self::create_ollama_provider(base_url, model),
658 _ => {
659 return Err(LlmError::configuration_error(format!(
660 "Unsupported provider: {}. Supported providers: anthropic, openai, lmstudio, ollama",
661 provider_name
662 )));
663 }
664 };
665
666 provider.validate()?;
667
668 Ok(Self {
669 provider,
670 default_params: DefaultLLMParams::default(),
671 })
672 }
673
674 fn create_anthropic_provider(
675 api_key: Option<String>,
676 base_url: Option<String>,
677 model: Option<String>,
678 ) -> Box<dyn ProviderConfig> {
679 let mut config = AnthropicConfig::default();
680 if let Some(key) = api_key {
681 config.api_key = Some(key);
682 } else if let Ok(env_key) = std::env::var("ANTHROPIC_API_KEY") {
683 config.api_key = Some(env_key);
684 }
685 if let Some(url) = base_url {
686 config.base_url = url;
687 }
688 if let Some(m) = model {
689 config.default_model = m;
690 }
691 Box::new(config)
692 }
693
694 fn create_openai_provider(
695 api_key: Option<String>,
696 base_url: Option<String>,
697 model: Option<String>,
698 ) -> Box<dyn ProviderConfig> {
699 let mut config = OpenAIConfig::default();
700 if let Some(key) = api_key {
701 config.api_key = Some(key);
702 }
703 if let Some(url) = base_url {
704 config.base_url = url;
705 }
706 if let Some(m) = model {
707 config.default_model = m;
708 }
709 Box::new(config)
710 }
711
712 fn create_lmstudio_provider(
713 base_url: Option<String>,
714 model: Option<String>,
715 ) -> Box<dyn ProviderConfig> {
716 let mut config = LMStudioConfig::default();
717 if let Some(url) = base_url {
718 config.base_url = url;
719 }
720 if let Some(m) = model {
721 config.default_model = m;
722 }
723 Box::new(config)
724 }
725
726 fn create_ollama_provider(
727 base_url: Option<String>,
728 model: Option<String>,
729 ) -> Box<dyn ProviderConfig> {
730 let mut config = OllamaConfig::default();
731 if let Some(url) = base_url {
732 config.base_url = url;
733 }
734 if let Some(m) = model {
735 config.default_model = m;
736 }
737 Box::new(config)
738 }
739
740 /// Load configuration from environment variables for the specified provider
741 /// This is the ONLY method that should access environment variables
742 ///
743 /// # Errors
744 ///
745 /// Returns [`LlmError::ConfigurationError`] if:
746 /// - Required environment variables are missing
747 /// - Environment variable values are invalid or malformed
748 /// - Provider configuration validation fails
749 ///
750 /// Returns [`LlmError::UnsupportedProvider`] if the AI_PROVIDER environment variable
751 /// contains an unrecognized provider name.
752 pub fn from_env() -> LlmResult<Self> {
753 let provider_name =
754 std::env::var("AI_PROVIDER").unwrap_or_else(|_| "anthropic".to_string());
755
756 log_debug!(
757 target_provider = %provider_name,
758 "Loading LLM configuration from environment"
759 );
760
761 let provider: Box<dyn ProviderConfig> = match provider_name.as_str() {
762 "anthropic" => Self::anthropic_from_env(),
763 "openai" => Self::openai_from_env(),
764 "lmstudio" => Self::lmstudio_from_env(),
765 _ => {
766 return Err(LlmError::unsupported_provider(provider_name));
767 }
768 };
769
770 provider.validate()?;
771
772 log_debug!(
773 provider = provider.provider_name(),
774 max_context_tokens = provider.max_context_tokens(),
775 base_url = provider.base_url(),
776 has_api_key = provider.api_key().is_some(),
777 "LLM configuration loaded and validated"
778 );
779
780 Ok(Self {
781 provider,
782 default_params: DefaultLLMParams::default(),
783 })
784 }
785
786 fn anthropic_from_env() -> Box<dyn ProviderConfig> {
787 let mut config = AnthropicConfig::default();
788 if let Ok(api_key) = std::env::var("ANTHROPIC_API_KEY") {
789 config.api_key = Some(api_key);
790 }
791 Box::new(config)
792 }
793
794 fn openai_from_env() -> Box<dyn ProviderConfig> {
795 let mut config = OpenAIConfig::default();
796 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
797 config.api_key = Some(api_key);
798 }
799 if let Ok(base_url) = std::env::var("OPENAI_BASE_URL") {
800 config.base_url = base_url;
801 }
802 Box::new(config)
803 }
804
805 fn lmstudio_from_env() -> Box<dyn ProviderConfig> {
806 let mut config = LMStudioConfig::default();
807 if let Ok(base_url) = std::env::var("LM_STUDIO_BASE_URL") {
808 config.base_url = base_url;
809 } else if let Ok(base_url) = std::env::var("OPENAI_BASE_URL") {
810 config.base_url = base_url;
811 }
812 Box::new(config)
813 }
814}