code_mesh_core/llm/
provider.rs

1//! LLM provider abstractions and implementations
2//!
3//! This module defines the core traits and types for integrating with various
4//! Language Model providers like Anthropic, OpenAI, Google, and others.
5
6use crate::{Error, Result};
7use async_trait::async_trait;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use std::sync::Arc;
11use std::env;
12
13use super::{
14    Message, GenerateOptions, GenerateResult, StreamChunk, FinishReason,
15    Usage, ToolDefinition, MessageRole, MessageContent
16};
17
18/// Provider trait for LLM providers
19#[async_trait]
20pub trait Provider: Send + Sync {
21    /// Unique identifier for this provider
22    fn id(&self) -> &str;
23    
24    /// Human-readable name of the provider
25    fn name(&self) -> &str;
26    
27    /// Base URL for the provider's API
28    fn base_url(&self) -> &str;
29    
30    /// API version being used
31    fn api_version(&self) -> &str;
32    
33    /// List of available models
34    async fn list_models(&self) -> Result<Vec<ModelInfo>>;
35    
36    /// Get a specific model by ID
37    async fn get_model(&self, model_id: &str) -> Result<Arc<dyn Model>>;
38    
39    /// Check if the provider is available (API reachable, credentials valid)
40    async fn health_check(&self) -> Result<ProviderHealth>;
41    
42    /// Get provider-specific configuration
43    fn get_config(&self) -> &ProviderConfig;
44    
45    /// Update provider configuration
46    async fn update_config(&mut self, config: ProviderConfig) -> Result<()>;
47    
48    /// Get rate limiting information
49    async fn get_rate_limits(&self) -> Result<RateLimitInfo>;
50    
51    /// Get current usage statistics
52    async fn get_usage(&self) -> Result<UsageStats>;
53}
54
55/// Model trait for individual language models
56#[async_trait]
57pub trait Model: Send + Sync {
58    /// Unique identifier for this model
59    fn id(&self) -> &str;
60    
61    /// Human-readable name of the model
62    fn name(&self) -> &str;
63    
64    /// Provider that owns this model
65    fn provider_id(&self) -> &str;
66    
67    /// Model capabilities
68    fn capabilities(&self) -> &ModelCapabilities;
69    
70    /// Model configuration
71    fn config(&self) -> &ModelConfig;
72    
73    /// Generate a response from messages
74    async fn generate(
75        &self,
76        messages: Vec<Message>,
77        options: GenerateOptions,
78    ) -> Result<GenerateResult>;
79    
80    /// Stream response generation
81    async fn stream(
82        &self,
83        messages: Vec<Message>,
84        options: GenerateOptions,
85    ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send>>>;
86    
87    /// Count tokens in messages
88    async fn count_tokens(&self, messages: &[Message]) -> Result<u32>;
89    
90    /// Estimate cost for a request
91    async fn estimate_cost(&self, input_tokens: u32, output_tokens: u32) -> Result<f64>;
92    
93    /// Get model-specific metadata
94    fn metadata(&self) -> &ModelMetadata;
95}
96
97use futures::Stream;
98use std::pin::Pin;
99
100/// Information about an available model
101#[derive(Debug, Clone, Serialize, Deserialize)]
102pub struct ModelInfo {
103    pub id: String,
104    pub name: String,
105    pub description: Option<String>,
106    pub capabilities: ModelCapabilities,
107    pub limits: ModelLimits,
108    pub pricing: ModelPricing,
109    pub release_date: Option<chrono::DateTime<chrono::Utc>>,
110    pub status: ModelStatus,
111}
112
113/// Model capabilities
114#[derive(Debug, Clone, Serialize, Deserialize)]
115pub struct ModelCapabilities {
116    /// Supports text generation
117    pub text_generation: bool,
118    
119    /// Supports tool/function calling
120    pub tool_calling: bool,
121    
122    /// Supports vision/image inputs
123    pub vision: bool,
124    
125    /// Supports streaming responses
126    pub streaming: bool,
127    
128    /// Supports response caching
129    pub caching: bool,
130    
131    /// Supports JSON mode
132    pub json_mode: bool,
133    
134    /// Supports reasoning/chain-of-thought
135    pub reasoning: bool,
136    
137    /// Supports code generation
138    pub code_generation: bool,
139    
140    /// Supports multiple languages
141    pub multilingual: bool,
142    
143    /// Custom capabilities
144    pub custom: HashMap<String, serde_json::Value>,
145}
146
147impl Default for ModelCapabilities {
148    fn default() -> Self {
149        Self {
150            text_generation: true,
151            tool_calling: false,
152            vision: false,
153            streaming: true,
154            caching: false,
155            json_mode: false,
156            reasoning: false,
157            code_generation: false,
158            multilingual: false,
159            custom: HashMap::new(),
160        }
161    }
162}
163
164/// Model limits and constraints
165#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct ModelLimits {
167    /// Maximum context length in tokens
168    pub max_context_tokens: u32,
169    
170    /// Maximum output tokens per request
171    pub max_output_tokens: u32,
172    
173    /// Maximum image size in bytes (if vision is supported)
174    pub max_image_size_bytes: Option<u64>,
175    
176    /// Maximum number of images per request
177    pub max_images_per_request: Option<u32>,
178    
179    /// Maximum number of tool calls per request
180    pub max_tool_calls: Option<u32>,
181    
182    /// Rate limits
183    pub rate_limits: RateLimitInfo,
184}
185
186/// Model pricing information
187#[derive(Debug, Clone, Serialize, Deserialize)]
188pub struct ModelPricing {
189    /// Cost per 1K input tokens
190    pub input_cost_per_1k: f64,
191    
192    /// Cost per 1K output tokens
193    pub output_cost_per_1k: f64,
194    
195    /// Cost per 1K cached input tokens
196    pub cache_read_cost_per_1k: Option<f64>,
197    
198    /// Cost per 1K cache write tokens
199    pub cache_write_cost_per_1k: Option<f64>,
200    
201    /// Currency code (e.g., "USD")
202    pub currency: String,
203}
204
205impl Default for ModelPricing {
206    fn default() -> Self {
207        Self {
208            input_cost_per_1k: 0.0,
209            output_cost_per_1k: 0.0,
210            cache_read_cost_per_1k: None,
211            cache_write_cost_per_1k: None,
212            currency: "USD".to_string(),
213        }
214    }
215}
216
217/// Model status
218#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
219#[serde(rename_all = "lowercase")]
220pub enum ModelStatus {
221    /// Model is available and fully functional
222    Active,
223    
224    /// Model is available but deprecated
225    Deprecated,
226    
227    /// Model is in beta/preview
228    Beta,
229    
230    /// Model is temporarily unavailable
231    Unavailable,
232    
233    /// Model is permanently discontinued
234    Discontinued,
235}
236
237/// Rate limiting information
238#[derive(Debug, Clone, Serialize, Deserialize)]
239pub struct RateLimitInfo {
240    /// Requests per minute
241    pub requests_per_minute: Option<u32>,
242    
243    /// Tokens per minute
244    pub tokens_per_minute: Option<u32>,
245    
246    /// Tokens per day
247    pub tokens_per_day: Option<u32>,
248    
249    /// Concurrent requests
250    pub concurrent_requests: Option<u32>,
251    
252    /// Current usage counts
253    pub current_usage: Option<CurrentUsage>,
254}
255
256/// Current usage against rate limits
257#[derive(Debug, Clone, Serialize, Deserialize)]
258pub struct CurrentUsage {
259    /// Requests used in current minute
260    pub requests_this_minute: u32,
261    
262    /// Tokens used in current minute
263    pub tokens_this_minute: u32,
264    
265    /// Tokens used today
266    pub tokens_today: u32,
267    
268    /// Currently active requests
269    pub active_requests: u32,
270}
271
272/// Provider health status
273#[derive(Debug, Clone, Serialize, Deserialize)]
274pub struct ProviderHealth {
275    /// Whether the provider is available
276    pub available: bool,
277    
278    /// Latency in milliseconds
279    pub latency_ms: Option<u64>,
280    
281    /// Any error messages
282    pub error: Option<String>,
283    
284    /// Timestamp of last check
285    pub last_check: chrono::DateTime<chrono::Utc>,
286    
287    /// Additional status information
288    pub details: HashMap<String, serde_json::Value>,
289}
290
291/// Provider configuration
292#[derive(Debug, Clone, Serialize, Deserialize)]
293pub struct ProviderConfig {
294    /// Provider ID
295    pub provider_id: String,
296    
297    /// API key or token
298    pub api_key: Option<String>,
299    
300    /// Base URL override
301    pub base_url_override: Option<String>,
302    
303    /// API version override
304    pub api_version_override: Option<String>,
305    
306    /// Request timeout in seconds
307    pub timeout_seconds: u64,
308    
309    /// Maximum retries
310    pub max_retries: u32,
311    
312    /// Retry delay in milliseconds
313    pub retry_delay_ms: u64,
314    
315    /// Custom headers
316    pub custom_headers: HashMap<String, String>,
317    
318    /// Organization ID (for providers that support it)
319    pub organization_id: Option<String>,
320    
321    /// Project ID (for providers that support it)
322    pub project_id: Option<String>,
323    
324    /// Additional configuration
325    pub extra: HashMap<String, serde_json::Value>,
326}
327
328impl Default for ProviderConfig {
329    fn default() -> Self {
330        Self {
331            provider_id: String::new(),
332            api_key: None,
333            base_url_override: None,
334            api_version_override: None,
335            timeout_seconds: 60,
336            max_retries: 3,
337            retry_delay_ms: 1000,
338            custom_headers: HashMap::new(),
339            organization_id: None,
340            project_id: None,
341            extra: HashMap::new(),
342        }
343    }
344}
345
346/// Model configuration
347#[derive(Debug, Clone, Serialize, Deserialize)]
348pub struct ModelConfig {
349    /// Model ID
350    pub model_id: String,
351    
352    /// Default temperature
353    pub default_temperature: Option<f32>,
354    
355    /// Default max tokens
356    pub default_max_tokens: Option<u32>,
357    
358    /// Default top-p
359    pub default_top_p: Option<f32>,
360    
361    /// Default stop sequences
362    pub default_stop_sequences: Vec<String>,
363    
364    /// Whether to use caching by default
365    pub use_caching: bool,
366    
367    /// Model-specific options
368    pub options: HashMap<String, serde_json::Value>,
369}
370
371impl Default for ModelConfig {
372    fn default() -> Self {
373        Self {
374            model_id: String::new(),
375            default_temperature: None,
376            default_max_tokens: None,
377            default_top_p: None,
378            default_stop_sequences: Vec::new(),
379            use_caching: false,
380            options: HashMap::new(),
381        }
382    }
383}
384
385/// Model metadata
386#[derive(Debug, Clone, Serialize, Deserialize)]
387pub struct ModelMetadata {
388    /// Model family (e.g., "gpt-4", "claude-3")
389    pub family: String,
390    
391    /// Model size/parameters (if known)
392    pub parameters: Option<String>,
393    
394    /// Training data cutoff
395    pub training_cutoff: Option<chrono::DateTime<chrono::Utc>>,
396    
397    /// Model version
398    pub version: Option<String>,
399    
400    /// Additional metadata
401    pub extra: HashMap<String, serde_json::Value>,
402}
403
404impl Default for ModelMetadata {
405    fn default() -> Self {
406        Self {
407            family: String::new(),
408            parameters: None,
409            training_cutoff: None,
410            version: None,
411            extra: HashMap::new(),
412        }
413    }
414}
415
416/// Usage statistics for a provider
417#[derive(Debug, Clone, Serialize, Deserialize)]
418pub struct UsageStats {
419    /// Total requests made
420    pub total_requests: u64,
421    
422    /// Total tokens consumed
423    pub total_tokens: u64,
424    
425    /// Total cost incurred
426    pub total_cost: f64,
427    
428    /// Currency for cost
429    pub currency: String,
430    
431    /// Usage by model
432    pub by_model: HashMap<String, ModelUsage>,
433    
434    /// Usage by time period
435    pub by_period: HashMap<String, PeriodUsage>,
436}
437
438/// Usage statistics for a specific model
439#[derive(Debug, Clone, Serialize, Deserialize)]
440pub struct ModelUsage {
441    /// Number of requests
442    pub requests: u64,
443    
444    /// Input tokens used
445    pub input_tokens: u64,
446    
447    /// Output tokens generated
448    pub output_tokens: u64,
449    
450    /// Cache hits
451    pub cache_hits: u64,
452    
453    /// Total cost
454    pub cost: f64,
455    
456    /// Average latency in milliseconds
457    pub avg_latency_ms: f64,
458}
459
460/// Usage statistics for a time period
461#[derive(Debug, Clone, Serialize, Deserialize)]
462pub struct PeriodUsage {
463    /// Start of period
464    pub start: chrono::DateTime<chrono::Utc>,
465    
466    /// End of period
467    pub end: chrono::DateTime<chrono::Utc>,
468    
469    /// Total requests in period
470    pub requests: u64,
471    
472    /// Total tokens in period
473    pub tokens: u64,
474    
475    /// Total cost in period
476    pub cost: f64,
477}
478
479/// Cost structure for model pricing
480#[derive(Debug, Clone, Serialize, Deserialize)]
481pub struct Cost {
482    /// Cost per 1K input tokens
483    pub input_per_1k: f64,
484    
485    /// Cost per 1K output tokens
486    pub output_per_1k: f64,
487    
488    /// Currency code
489    pub currency: String,
490}
491
492/// Model limits structure
493#[derive(Debug, Clone, Serialize, Deserialize)]
494pub struct Limits {
495    /// Maximum context tokens
496    pub max_context_tokens: u32,
497    
498    /// Maximum output tokens
499    pub max_output_tokens: u32,
500}
501
502/// Provider source enum
503#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
504#[serde(rename_all = "lowercase")]
505pub enum ProviderSource {
506    /// Official provider
507    Official,
508    
509    /// Community provider
510    Community,
511    
512    /// Custom provider
513    Custom,
514}
515
516/// Provider status enum
517#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
518#[serde(rename_all = "lowercase")]
519pub enum ProviderStatus {
520    /// Provider is active
521    Active,
522    
523    /// Provider is in beta
524    Beta,
525    
526    /// Provider is deprecated
527    Deprecated,
528    
529    /// Provider is unavailable
530    Unavailable,
531}
532
533/// Retry configuration
534#[derive(Debug, Clone, Serialize, Deserialize)]
535pub struct RetryConfig {
536    /// Maximum number of retries
537    pub max_retries: u32,
538    
539    /// Initial retry delay in milliseconds
540    pub initial_delay_ms: u64,
541    
542    /// Maximum retry delay in milliseconds
543    pub max_delay_ms: u64,
544    
545    /// Exponential backoff multiplier
546    pub multiplier: f32,
547}
548
549impl Default for RetryConfig {
550    fn default() -> Self {
551        Self {
552            max_retries: 3,
553            initial_delay_ms: 1000,
554            max_delay_ms: 10000,
555            multiplier: 2.0,
556        }
557    }
558}
559
560/// Retry helper function with exponential backoff
561pub async fn retry_with_backoff<F, T, E>(
562    config: &RetryConfig,
563    operation: F,
564) -> Result<T>
565where
566    F: Fn() -> futures::future::BoxFuture<'static, Result<T>>,
567{
568    use tokio::time::{sleep, Duration};
569    
570    let mut attempts = 0;
571    let mut delay = config.initial_delay_ms;
572    
573    loop {
574        match operation().await {
575            Ok(result) => return Ok(result),
576            Err(e) if attempts < config.max_retries => {
577                attempts += 1;
578                sleep(Duration::from_millis(delay)).await;
579                delay = (delay as f32 * config.multiplier) as u64;
580                delay = delay.min(config.max_delay_ms);
581            }
582            Err(e) => return Err(e),
583        }
584    }
585}
586
587/// Registry for managing LLM providers
588pub struct ProviderRegistry {
589    providers: HashMap<String, Arc<dyn Provider>>,
590    models: HashMap<String, Arc<dyn Model>>,
591    default_provider: Option<String>,
592    storage: Arc<dyn crate::auth::AuthStorage>,
593}
594
595impl ProviderRegistry {
596    /// Create a new provider registry with authentication storage
597    pub fn new(storage: Arc<dyn crate::auth::AuthStorage>) -> Self {
598        Self {
599            providers: HashMap::new(),
600            models: HashMap::new(),
601            default_provider: None,
602            storage,
603        }
604    }
605
606    /// Register a provider
607    pub fn register_provider(&mut self, provider: Arc<dyn Provider>) -> Result<()> {
608        let provider_id = provider.id().to_string();
609        
610        if self.providers.contains_key(&provider_id) {
611            return Err(Error::Other(anyhow::anyhow!(
612                "Provider {} is already registered",
613                provider_id
614            )));
615        }
616        
617        self.providers.insert(provider_id, provider);
618        Ok(())
619    }
620
621    /// Get a provider by ID
622    pub fn get_provider(&self, provider_id: &str) -> Result<Arc<dyn Provider>> {
623        self.providers
624            .get(provider_id)
625            .cloned()
626            .ok_or_else(|| Error::Other(anyhow::anyhow!("Provider {} not found", provider_id)))
627    }
628
629    /// List all registered providers
630    pub fn list_providers(&self) -> Vec<String> {
631        self.providers.keys().cloned().collect()
632    }
633
634    /// Get a model by provider and model ID
635    pub async fn get_model(&mut self, provider_id: &str, model_id: &str) -> Result<Arc<dyn Model>> {
636        let key = format!("{}/{}", provider_id, model_id);
637        
638        // Check cache first
639        if let Some(model) = self.models.get(&key) {
640            return Ok(model.clone());
641        }
642        
643        // Get provider and fetch model
644        let provider = self.get_provider(provider_id)?;
645        let model = provider.get_model(model_id).await?;
646        
647        // Cache the model
648        self.models.insert(key, model.clone());
649        
650        Ok(model)
651    }
652
653    /// Parse a model string (format: "provider/model" or "provider:model")
654    pub fn parse_model_string(&self, model_string: &str) -> Result<(String, String)> {
655        if let Some((provider, model)) = model_string.split_once('/') {
656            Ok((provider.to_string(), model.to_string()))
657        } else if let Some((provider, model)) = model_string.split_once(':') {
658            Ok((provider.to_string(), model.to_string()))
659        } else {
660            // If no separator, use default provider
661            if let Some(default_provider) = &self.default_provider {
662                Ok((default_provider.clone(), model_string.to_string()))
663            } else {
664                Err(Error::Other(anyhow::anyhow!(
665                    "Invalid model string format: {}. Expected 'provider/model' or 'provider:model'",
666                    model_string
667                )))
668            }
669        }
670    }
671
672    /// Set default provider
673    pub fn set_default_provider(&mut self, provider_id: &str) -> Result<()> {
674        if !self.providers.contains_key(provider_id) {
675            return Err(Error::Other(anyhow::anyhow!(
676                "Provider {} is not registered",
677                provider_id
678            )));
679        }
680        
681        self.default_provider = Some(provider_id.to_string());
682        Ok(())
683    }
684
685    /// Get default provider
686    pub fn get_default_provider(&self) -> Option<&str> {
687        self.default_provider.as_deref()
688    }
689
690    /// List all available models
691    pub async fn list_all_models(&self) -> Result<Vec<ModelInfo>> {
692        let mut all_models = Vec::new();
693        
694        for provider in self.providers.values() {
695            match provider.list_models().await {
696                Ok(models) => all_models.extend(models),
697                Err(e) => {
698                    tracing::warn!("Failed to list models for provider {}: {}", provider.id(), e);
699                }
700            }
701        }
702        
703        Ok(all_models)
704    }
705
706    /// Get provider health for all providers
707    pub async fn get_all_provider_health(&self) -> HashMap<String, ProviderHealth> {
708        let mut health_status = HashMap::new();
709        
710        for (id, provider) in &self.providers {
711            match provider.health_check().await {
712                Ok(health) => {
713                    health_status.insert(id.clone(), health);
714                }
715                Err(e) => {
716                    health_status.insert(
717                        id.clone(),
718                        ProviderHealth {
719                            available: false,
720                            latency_ms: None,
721                            error: Some(e.to_string()),
722                            last_check: chrono::Utc::now(),
723                            details: HashMap::new(),
724                        },
725                    );
726                }
727            }
728        }
729        
730        health_status
731    }
732
733    /// Clear cached models
734    pub fn clear_model_cache(&mut self) {
735        self.models.clear();
736    }
737
738    /// Remove a provider
739    pub fn remove_provider(&mut self, provider_id: &str) -> Result<()> {
740        if !self.providers.contains_key(provider_id) {
741            return Err(Error::Other(anyhow::anyhow!(
742                "Provider {} is not registered",
743                provider_id
744            )));
745        }
746        
747        // Remove provider
748        self.providers.remove(provider_id);
749        
750        // Remove cached models for this provider
751        self.models.retain(|key, _| !key.starts_with(&format!("{}/", provider_id)));
752        
753        // Clear default provider if it was this one
754        if self.default_provider.as_deref() == Some(provider_id) {
755            self.default_provider = None;
756        }
757        
758        Ok(())
759    }
760    
761    /// Discover providers from environment variables
762    pub async fn discover_from_env(&mut self) -> Result<()> {
763        // Check for Anthropic API key
764        if env::var("ANTHROPIC_API_KEY").is_ok() {
765            if let Ok(provider) = self.create_anthropic_provider().await {
766                self.register_provider(provider)?;
767            }
768        }
769        
770        // Check for OpenAI API key
771        if env::var("OPENAI_API_KEY").is_ok() {
772            if let Ok(provider) = self.create_openai_provider().await {
773                self.register_provider(provider)?;
774            }
775        }
776        
777        // Check for GitHub Copilot authentication
778        if env::var("GITHUB_TOKEN").is_ok() || env::var("GITHUB_COPILOT_TOKEN").is_ok() {
779            if let Ok(provider) = self.create_github_copilot_provider().await {
780                self.register_provider(provider)?;
781            }
782        }
783        
784        Ok(())
785    }
786    
787    /// Discover providers from storage
788    pub async fn discover_from_storage(&mut self) -> Result<()> {
789        // Check for stored Anthropic credentials
790        if let Ok(Some(_)) = self.storage.get("anthropic").await {
791            if let Ok(provider) = self.create_anthropic_provider().await {
792                self.register_provider(provider)?;
793            }
794        }
795        
796        // Check for stored OpenAI credentials
797        if let Ok(Some(_)) = self.storage.get("openai").await {
798            if let Ok(provider) = self.create_openai_provider().await {
799                self.register_provider(provider)?;
800            }
801        }
802        
803        // Check for stored GitHub Copilot credentials
804        if let Ok(Some(_)) = self.storage.get("github-copilot").await {
805            if let Ok(provider) = self.create_github_copilot_provider().await {
806                self.register_provider(provider)?;
807            }
808        }
809        
810        Ok(())
811    }
812    
813    /// Initialize all registered providers
814    pub async fn initialize_all(&mut self) -> Result<()> {
815        let provider_ids: Vec<String> = self.providers.keys().cloned().collect();
816        
817        for provider_id in provider_ids {
818            match self.providers.get(&provider_id) {
819                Some(provider) => {
820                    // Perform health check to ensure provider is initialized
821                    if let Err(e) = provider.health_check().await {
822                        tracing::warn!("Failed to initialize provider {}: {}", provider_id, e);
823                    }
824                }
825                None => continue,
826            }
827        }
828        
829        Ok(())
830    }
831    
832    /// Load models from models.dev API
833    pub async fn load_models_dev(&mut self) -> Result<()> {
834        // This would fetch model configurations from models.dev
835        // For now, we'll use built-in configurations
836        tracing::info!("Loading models from models.dev (using built-in configs for now)");
837        Ok(())
838    }
839    
840    /// Load configurations from a file
841    pub async fn load_configs(&mut self, path: &str) -> Result<()> {
842        use std::path::Path;
843        use tokio::fs;
844        
845        let path = Path::new(path);
846        if !path.exists() {
847            return Err(Error::Other(anyhow::anyhow!(
848                "Configuration file not found: {}",
849                path.display()
850            )));
851        }
852        
853        let contents = fs::read_to_string(path).await?;
854        let configs: HashMap<String, ProviderConfig> = serde_json::from_str(&contents)?;
855        
856        for (provider_id, config) in configs {
857            // We can't mutate through Arc, so we'd need to recreate the provider
858            // For now, just log a warning
859            if self.providers.contains_key(&provider_id) {
860                tracing::warn!("Cannot update config for provider {} - providers are immutable through Arc", provider_id);
861            }
862        }
863        
864        Ok(())
865    }
866    
867    /// Get a provider by ID (async version)
868    pub async fn get(&self, provider_id: &str) -> Option<Arc<dyn Provider>> {
869        self.providers.get(provider_id).cloned()
870    }
871    
872    /// Parse a model string (format: "provider/model" or just "model")
873    pub fn parse_model(model_str: &str) -> (String, String) {
874        if let Some((provider, model)) = model_str.split_once('/') {
875            (provider.to_string(), model.to_string())
876        } else if let Some((provider, model)) = model_str.split_once(':') {
877            (provider.to_string(), model.to_string())
878        } else {
879            // Default to anthropic for backward compatibility
880            ("anthropic".to_string(), model_str.to_string())
881        }
882    }
883    
884    /// Get the default model for a provider
885    pub async fn get_default_model(&self, provider_id: &str) -> Result<Arc<dyn Model>> {
886        let provider = self.get_provider(provider_id)?;
887        
888        // Try to get the provider's preferred default model
889        let models = provider.list_models().await?;
890        if let Some(default_model) = models.iter().find(|m| m.status == ModelStatus::Active) {
891            provider.get_model(&default_model.id).await
892        } else if let Some(first_model) = models.first() {
893            provider.get_model(&first_model.id).await
894        } else {
895            Err(Error::Other(anyhow::anyhow!(
896                "Provider {} has no available models",
897                provider_id
898            )))
899        }
900    }
901    
902    /// Get list of available providers (those that can authenticate)
903    pub async fn available(&self) -> Vec<String> {
904        let mut available = Vec::new();
905        
906        for (id, provider) in &self.providers {
907            if let Ok(health) = provider.health_check().await {
908                if health.available {
909                    available.push(id.clone());
910                }
911            }
912        }
913        
914        available
915    }
916    
917    /// List all registered provider IDs
918    pub async fn list(&self) -> Vec<String> {
919        self.providers.keys().cloned().collect()
920    }
921    
922    /// Register a provider (async version)
923    pub async fn register(&mut self, provider: Arc<dyn Provider>) {
924        let provider_id = provider.id().to_string();
925        self.providers.insert(provider_id, provider);
926    }
927    
928    // Helper methods to create providers
929    async fn create_anthropic_provider(&self) -> Result<Arc<dyn Provider>> {
930        // This would create an Anthropic provider using the auth storage
931        // For now, return an error as the actual implementation depends on the anthropic module
932        Err(Error::Other(anyhow::anyhow!("Anthropic provider creation not implemented in this context")))
933    }
934    
935    async fn create_openai_provider(&self) -> Result<Arc<dyn Provider>> {
936        // This would create an OpenAI provider using the auth storage
937        // For now, return an error as the actual implementation depends on the openai module
938        Err(Error::Other(anyhow::anyhow!("OpenAI provider creation not implemented in this context")))
939    }
940    
941    async fn create_github_copilot_provider(&self) -> Result<Arc<dyn Provider>> {
942        // This would create a GitHub Copilot provider using the auth storage
943        // For now, return an error as the actual implementation depends on the github_copilot module
944        Err(Error::Other(anyhow::anyhow!("GitHub Copilot provider creation not implemented in this context")))
945    }
946}
947
948// Note: Default implementation removed as ProviderRegistry now requires AuthStorage
949
950#[cfg(test)]
951mod tests {
952    use super::*;
953
954    #[test]
955    fn test_parse_model_string() {
956        // Test static method parse_model
957        // Test with slash separator
958        let (provider, model) = ProviderRegistry::parse_model("anthropic/claude-3-opus");
959        assert_eq!(provider, "anthropic");
960        assert_eq!(model, "claude-3-opus");
961        
962        // Test with colon separator
963        let (provider, model) = ProviderRegistry::parse_model("openai:gpt-4");
964        assert_eq!(provider, "openai");
965        assert_eq!(model, "gpt-4");
966        
967        // Test without separator (defaults to anthropic)
968        let (provider, model) = ProviderRegistry::parse_model("claude-3-opus");
969        assert_eq!(provider, "anthropic");
970        assert_eq!(model, "claude-3-opus");
971    }
972
973    #[test]
974    fn test_model_capabilities_default() {
975        let caps = ModelCapabilities::default();
976        assert!(caps.text_generation);
977        assert!(!caps.tool_calling);
978        assert!(!caps.vision);
979        assert!(caps.streaming);
980    }
981
982    #[test]
983    fn test_provider_config_default() {
984        let config = ProviderConfig::default();
985        assert_eq!(config.timeout_seconds, 60);
986        assert_eq!(config.max_retries, 3);
987        assert_eq!(config.retry_delay_ms, 1000);
988    }
989}