vtcode_config/
models.rs

1//! Model configuration and identification module
2//!
3//! This module provides a centralized enum for model identifiers and their configurations,
4//! replacing hardcoded model strings throughout the codebase for better maintainability.
5//! Read the model list in `docs/models.json`.
6
7use serde::{Deserialize, Serialize};
8use std::fmt;
9use std::str::FromStr;
10
11#[derive(Clone, Copy)]
12pub struct OpenRouterMetadata {
13    id: &'static str,
14    vendor: &'static str,
15    display: &'static str,
16    description: &'static str,
17    efficient: bool,
18    top_tier: bool,
19    generation: &'static str,
20    reasoning: bool,
21    tool_call: bool,
22}
23
24/// Supported AI model providers
25#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
26#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
27pub enum Provider {
28    /// Google Gemini models
29    #[default]
30    Gemini,
31    /// OpenAI GPT models
32    OpenAI,
33    /// Anthropic Claude models
34    Anthropic,
35    /// DeepSeek native models
36    DeepSeek,
37    /// OpenRouter marketplace models
38    OpenRouter,
39    /// Local Ollama models
40    Ollama,
41    /// LM Studio local server (OpenAI-compatible)
42    LmStudio,
43    /// Moonshot.ai models
44    Moonshot,
45    /// xAI Grok models
46    XAI,
47    /// Z.AI GLM models
48    ZAI,
49    /// MiniMax models
50    Minimax,
51    /// Hugging Face Inference Providers
52    HuggingFace,
53}
54
55impl Provider {
56    /// Get the default API key environment variable for this provider
57    pub fn default_api_key_env(&self) -> &'static str {
58        match self {
59            Provider::Gemini => "GEMINI_API_KEY",
60            Provider::OpenAI => "OPENAI_API_KEY",
61            Provider::Anthropic => "ANTHROPIC_API_KEY",
62            Provider::DeepSeek => "DEEPSEEK_API_KEY",
63            Provider::OpenRouter => "OPENROUTER_API_KEY",
64            Provider::Ollama => "OLLAMA_API_KEY",
65            Provider::LmStudio => "LMSTUDIO_API_KEY",
66            Provider::Moonshot => "MOONSHOT_API_KEY",
67            Provider::XAI => "XAI_API_KEY",
68            Provider::ZAI => "ZAI_API_KEY",
69            Provider::Minimax => "MINIMAX_API_KEY",
70            Provider::HuggingFace => "HF_TOKEN",
71        }
72    }
73
74    /// Get all supported providers
75    pub fn all_providers() -> Vec<Provider> {
76        vec![
77            Provider::OpenAI,
78            Provider::Anthropic,
79            Provider::Gemini,
80            Provider::DeepSeek,
81            Provider::OpenRouter,
82            Provider::Ollama,
83            Provider::LmStudio,
84            Provider::Moonshot,
85            Provider::XAI,
86            Provider::ZAI,
87            Provider::Minimax,
88            Provider::HuggingFace,
89        ]
90    }
91
92    /// Human-friendly label for display purposes
93    pub fn label(&self) -> &'static str {
94        match self {
95            Provider::Gemini => "Gemini",
96            Provider::OpenAI => "OpenAI",
97            Provider::Anthropic => "Anthropic",
98            Provider::DeepSeek => "DeepSeek",
99            Provider::OpenRouter => "OpenRouter",
100            Provider::Ollama => "Ollama",
101            Provider::LmStudio => "LM Studio",
102            Provider::Moonshot => "Moonshot",
103            Provider::XAI => "xAI",
104            Provider::ZAI => "Z.AI",
105            Provider::Minimax => "MiniMax",
106            Provider::HuggingFace => "Hugging Face",
107        }
108    }
109
110    /// Determine if the provider supports configurable reasoning effort for the model
111    pub fn supports_reasoning_effort(&self, model: &str) -> bool {
112        use crate::constants::models;
113
114        match self {
115            Provider::Gemini => models::google::REASONING_MODELS.contains(&model),
116            Provider::OpenAI => models::openai::REASONING_MODELS.contains(&model),
117            Provider::Anthropic => models::anthropic::REASONING_MODELS.contains(&model),
118            Provider::DeepSeek => model == models::deepseek::DEEPSEEK_REASONER,
119            Provider::OpenRouter => {
120                if let Ok(model_id) = ModelId::from_str(model) {
121                    return model_id.is_reasoning_variant();
122                }
123                models::openrouter::REASONING_MODELS.contains(&model)
124            }
125            Provider::Ollama => models::ollama::REASONING_LEVEL_MODELS.contains(&model),
126            Provider::LmStudio => false,
127            Provider::Moonshot => false,
128            Provider::XAI => model == models::xai::GROK_4 || model == models::xai::GROK_4_CODE,
129            Provider::ZAI => model == models::zai::GLM_4_6,
130            Provider::Minimax => {
131                model == models::minimax::MINIMAX_M2_1
132                    || model == models::minimax::MINIMAX_M2_1_LIGHTNING
133                    || model == models::minimax::MINIMAX_M2
134            }
135            Provider::HuggingFace => models::huggingface::REASONING_MODELS.contains(&model),
136        }
137    }
138}
139
140impl fmt::Display for Provider {
141    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
142        match self {
143            Provider::Gemini => write!(f, "gemini"),
144            Provider::OpenAI => write!(f, "openai"),
145            Provider::Anthropic => write!(f, "anthropic"),
146            Provider::DeepSeek => write!(f, "deepseek"),
147            Provider::OpenRouter => write!(f, "openrouter"),
148            Provider::Ollama => write!(f, "ollama"),
149            Provider::LmStudio => write!(f, "lmstudio"),
150            Provider::Moonshot => write!(f, "moonshot"),
151            Provider::XAI => write!(f, "xai"),
152            Provider::ZAI => write!(f, "zai"),
153            Provider::Minimax => write!(f, "minimax"),
154            Provider::HuggingFace => write!(f, "huggingface"),
155        }
156    }
157}
158
159impl FromStr for Provider {
160    type Err = ModelParseError;
161
162    fn from_str(s: &str) -> Result<Self, Self::Err> {
163        match s.to_lowercase().as_str() {
164            "gemini" => Ok(Provider::Gemini),
165            "openai" => Ok(Provider::OpenAI),
166            "anthropic" => Ok(Provider::Anthropic),
167            "deepseek" => Ok(Provider::DeepSeek),
168            "openrouter" => Ok(Provider::OpenRouter),
169            "ollama" => Ok(Provider::Ollama),
170            "lmstudio" => Ok(Provider::LmStudio),
171            "moonshot" => Ok(Provider::Moonshot),
172            "xai" => Ok(Provider::XAI),
173            "zai" => Ok(Provider::ZAI),
174            "minimax" => Ok(Provider::Minimax),
175            "huggingface" => Ok(Provider::HuggingFace),
176            _ => Err(ModelParseError::InvalidProvider(s.to_string())),
177        }
178    }
179}
180
181/// Centralized enum for all supported model identifiers
182#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
183#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
184pub enum ModelId {
185    // Gemini models
186    /// Gemini 2.5 Flash Preview - Latest fast model with advanced capabilities
187    #[default]
188    Gemini25FlashPreview,
189    /// Gemini 2.5 Flash - Legacy alias for flash preview
190    Gemini25Flash,
191    /// Gemini 2.5 Flash Lite - Legacy alias for flash preview (lite)
192    Gemini25FlashLite,
193    /// Gemini 2.5 Pro - Latest most capable Gemini model
194    Gemini25Pro,
195    /// Gemini 3 Pro Preview - Preview of next-generation Gemini model
196    Gemini3ProPreview,
197
198    // OpenAI models
199    /// GPT-5 - Latest most capable OpenAI model (2025-08-07)
200    GPT5,
201    /// GPT-5 Codex - Code-focused GPT-5 variant using the Responses API
202    GPT5Codex,
203    /// GPT-5 Mini - Latest efficient OpenAI model (2025-08-07)
204    GPT5Mini,
205    /// GPT-5 Nano - Latest most cost-effective OpenAI model (2025-08-07)
206    GPT5Nano,
207    /// Codex Mini Latest - Latest Codex model for code generation (2025-05-16)
208    CodexMiniLatest,
209    /// GPT-OSS 20B - OpenAI's open-source 20B parameter model using harmony
210    OpenAIGptOss20b,
211    /// GPT-OSS 120B - OpenAI's open-source 120B parameter model using harmony
212    OpenAIGptOss120b,
213
214    // Anthropic models
215    /// Claude Opus 4.5 - Latest flagship Anthropic model with exceptional reasoning (2025-11-01)
216    ClaudeOpus45,
217    /// Claude Opus 4.1 - Previous most capable Anthropic model (2025-08-05)
218    ClaudeOpus41,
219    /// Claude Sonnet 4.5 - Latest balanced Anthropic model (2025-10-15)
220    ClaudeSonnet45,
221    /// Claude Haiku 4.5 - Latest efficient Anthropic model (2025-10-15)
222    ClaudeHaiku45,
223    /// Claude Sonnet 4 - Previous balanced Anthropic model (2025-05-14)
224    ClaudeSonnet4,
225
226    // DeepSeek models
227    /// DeepSeek V3.2 Chat - Fast non-thinking mode
228    DeepSeekChat,
229    /// DeepSeek V3.2 Reasoner - Thinking mode with structured reasoning output
230    DeepSeekReasoner,
231    // Hugging Face models
232    /// DeepSeek V3.2 via Hugging Face router
233    HuggingFaceDeepseekV32,
234    /// OpenAI GPT-OSS 20B via Hugging Face router
235    HuggingFaceOpenAIGptOss20b,
236    /// OpenAI GPT-OSS 120B via Hugging Face router
237    HuggingFaceOpenAIGptOss120b,
238    /// Z.AI GLM-4.7 via Hugging Face router
239    HuggingFaceGlm47,
240    /// Z.AI GLM-4.7-Flash via Novita on Hugging Face router
241    HuggingFaceGlm47FlashNovita,
242    /// MoonshotAI Kimi K2 Thinking via Hugging Face router
243    HuggingFaceKimiK2Thinking,
244    /// MiniMax M2.1 via Novita on Hugging Face router - Enhanced reasoning
245    HuggingFaceMinimaxM21Novita,
246    /// DeepSeek V3.2 via Novita on Hugging Face router
247    HuggingFaceDeepseekV32Novita,
248    /// Xiaomi MiMo-V2-Flash via Novita on Hugging Face router
249    HuggingFaceXiaomiMimoV2FlashNovita,
250
251    // xAI models
252    /// Grok-4 - Flagship xAI model with advanced reasoning
253    XaiGrok4,
254    /// Grok-4 Mini - Efficient xAI model variant
255    XaiGrok4Mini,
256    /// Grok-4 Code - Code-focused Grok deployment
257    XaiGrok4Code,
258    /// Grok-4 Code Latest - Latest Grok code model with enhanced reasoning tools
259    XaiGrok4CodeLatest,
260    /// Grok-4 Vision - Multimodal Grok model
261    XaiGrok4Vision,
262
263    // Z.AI models
264    /// GLM-4-Plus - Flagship GLM model with top-tier reasoning
265    ZaiGlm4Plus,
266    /// GLM-4-Plus Deep Thinking - Flagship GLM with forced reasoning
267    ZaiGlm4PlusDeepThinking,
268    /// GLM-4.7 - Latest flagship GLM reasoning model
269    ZaiGlm47,
270    /// GLM-4.7 Deep Thinking - GLM-4.7 with forced reasoning
271    ZaiGlm47DeepThinking,
272    /// GLM-4.7 Flash - Lightweight GLM-4.7 model optimized for agentic coding
273    ZaiGlm47Flash,
274    /// GLM-4.6 - Previous flagship GLM reasoning model
275    ZaiGlm46,
276    /// GLM-4.6 Deep Thinking - GLM-4.6 with forced reasoning
277    ZaiGlm46DeepThinking,
278    /// GLM-4.6V - Vision-capable GLM release
279    ZaiGlm46V,
280    /// GLM-4.6V-Flash - Latency-optimised vision GLM
281    ZaiGlm46VFlash,
282    /// GLM-4.6V-FlashX - Hybrid vision GLM variant
283    ZaiGlm46VFlashX,
284    /// GLM-4.5 - Balanced GLM release for general tasks
285    ZaiGlm45,
286    /// GLM-4.5 Deep Thinking - GLM-4.5 with forced reasoning
287    ZaiGlm45DeepThinking,
288    /// GLM-4.5-Air - Efficient GLM variant
289    ZaiGlm45Air,
290    /// GLM-4.5-X - Enhanced capability GLM variant
291    ZaiGlm45X,
292    /// GLM-4.5-AirX - Hybrid efficient GLM variant
293    ZaiGlm45Airx,
294    /// GLM-4.5-Flash - Low-latency GLM variant
295    ZaiGlm45Flash,
296    /// GLM-4.5V - Vision-capable GLM release
297    ZaiGlm45V,
298    /// GLM-4-32B-0414-128K - Legacy long-context GLM deployment
299    ZaiGlm432b0414128k,
300
301    // Ollama models
302    /// GPT-OSS 20B - Open-weight GPT-OSS 20B model served via Ollama locally
303    OllamaGptOss20b,
304    /// GPT-OSS 20B Cloud - Cloud-hosted GPT-OSS 20B served via Ollama Cloud
305    OllamaGptOss20bCloud,
306    /// GPT-OSS 120B Cloud - Cloud-hosted GPT-OSS 120B served via Ollama Cloud
307    OllamaGptOss120bCloud,
308    /// Qwen3 1.7B - Qwen3 1.7B model served via Ollama
309    OllamaQwen317b,
310    /// DeepSeek V3.2 Cloud - DeepSeek V3.2 reasoning deployment via Ollama Cloud
311    OllamaDeepseekV32Cloud,
312    /// Qwen3 Next 80B Cloud - Next-generation Qwen3 80B via Ollama Cloud
313    OllamaQwen3Next80bCloud,
314    /// Mistral Large 3 675B Cloud - Mistral Large 3 reasoning model via Ollama Cloud
315    OllamaMistralLarge3675bCloud,
316    /// Kimi K2 Thinking Cloud - MoonshotAI Kimi K2 thinking model via Ollama Cloud
317    OllamaKimiK2ThinkingCloud,
318    /// Qwen3 Coder 480B Cloud - Cloud-hosted Qwen3 Coder model served via Ollama Cloud
319    OllamaQwen3Coder480bCloud,
320    /// GLM-4.6 Cloud - Cloud-hosted GLM-4.6 model served via Ollama Cloud
321    OllamaGlm46Cloud,
322    /// Gemini 3 Pro Preview Latest Cloud - Google Gemini 3 Pro Preview via Ollama Cloud
323    OllamaGemini3ProPreviewLatestCloud,
324    /// Devstral 2 123B Cloud - Mistral Devstral 2 123B model via Ollama Cloud
325    OllamaDevstral2123bCloud,
326    /// MiniMax-M2 Cloud - Cloud-hosted MiniMax-M2 model served via Ollama Cloud
327    OllamaMinimaxM2Cloud,
328    /// GLM-4.7 Cloud - Cloud-hosted GLM-4.7 model served via Ollama Cloud
329    OllamaGlm47Cloud,
330    /// MiniMax-M2.1 Cloud - Cloud-hosted MiniMax-M2.1 model served via Ollama Cloud
331    OllamaMinimaxM21Cloud,
332    /// Gemini 3 Flash Preview Cloud - Google Gemini 3 Flash Preview via Ollama Cloud
333    OllamaGemini3FlashPreviewCloud,
334    /// Nemotron-3-Nano 30B Cloud - NVIDIA Nemotron-3-Nano 30B via Ollama Cloud
335    OllamaNemotron3Nano30bCloud,
336
337    // MiniMax models
338    /// MiniMax-M2.1 - Latest MiniMax model with enhanced code understanding and reasoning
339    MinimaxM21,
340    /// MiniMax-M2.1-lightning - Fast version of MiniMax-M2.1
341    MinimaxM21Lightning,
342    /// MiniMax-M2 - MiniMax reasoning-focused model
343    MinimaxM2,
344
345    // LM Studio models
346    /// Meta Llama 3 8B Instruct served locally via LM Studio
347    LmStudioMetaLlama38BInstruct,
348    /// Meta Llama 3.1 8B Instruct served locally via LM Studio
349    LmStudioMetaLlama318BInstruct,
350    /// Qwen2.5 7B Instruct served locally via LM Studio
351    LmStudioQwen257BInstruct,
352    /// Gemma 2 2B IT served locally via LM Studio
353    LmStudioGemma22BIt,
354    /// Gemma 2 9B IT served locally via LM Studio
355    LmStudioGemma29BIt,
356    /// Phi-3.1 Mini 4K Instruct served locally via LM Studio
357    LmStudioPhi31Mini4kInstruct,
358
359    // OpenRouter models
360    /// Grok Code Fast 1 - Fast OpenRouter coding model powered by xAI Grok
361    OpenRouterGrokCodeFast1,
362    /// Grok 4 Fast - Reasoning-focused Grok endpoint with transparent traces
363    OpenRouterGrok4Fast,
364    /// Grok 4.1 Fast - Enhanced Grok 4.1 fast inference with improved reasoning
365    OpenRouterGrok41Fast,
366    /// Grok 4 - Flagship Grok 4 endpoint exposed through OpenRouter
367    OpenRouterGrok4,
368    /// GLM 4.6 - Z.AI GLM 4.6 long-context reasoning model
369    OpenRouterZaiGlm46,
370    /// Kimi K2 0905 - MoonshotAI Kimi K2 0905 MoE release optimised for coding agents
371    OpenRouterMoonshotaiKimiK20905,
372    /// Kimi K2 Thinking - MoonshotAI reasoning-tier Kimi K2 release optimized for long-horizon agents
373    OpenRouterMoonshotaiKimiK2Thinking,
374    /// Qwen3 Max - Flagship Qwen3 mixture for general reasoning
375    OpenRouterQwen3Max,
376    /// Qwen3 235B A22B - Mixture-of-experts Qwen3 235B general model
377    OpenRouterQwen3235bA22b,
378    /// Qwen3 235B A22B Instruct 2507 - Instruction-tuned Qwen3 235B A22B
379    OpenRouterQwen3235bA22b2507,
380    /// Qwen3 235B A22B Thinking 2507 - Deliberative Qwen3 235B A22B reasoning release
381    OpenRouterQwen3235bA22bThinking2507,
382    /// Qwen3 32B - Dense 32B Qwen3 deployment
383    OpenRouterQwen332b,
384    /// Qwen3 30B A3B - Active-parameter 30B Qwen3 model
385    OpenRouterQwen330bA3b,
386    /// Qwen3 30B A3B Instruct 2507 - Instruction-tuned Qwen3 30B A3B
387    OpenRouterQwen330bA3bInstruct2507,
388    /// Qwen3 30B A3B Thinking 2507 - Deliberative Qwen3 30B A3B release
389    OpenRouterQwen330bA3bThinking2507,
390    /// Qwen3 14B - Lightweight Qwen3 14B model
391    OpenRouterQwen314b,
392    /// Qwen3 8B - Compact Qwen3 8B deployment
393    OpenRouterQwen38b,
394    /// Qwen3 Next 80B A3B Instruct - Next-generation Qwen3 instruction model
395    OpenRouterQwen3Next80bA3bInstruct,
396    /// Qwen3 Next 80B A3B Thinking - Next-generation Qwen3 reasoning release
397    OpenRouterQwen3Next80bA3bThinking,
398    /// Qwen3 Coder - Qwen3-based coding model tuned for IDE workflows
399    OpenRouterQwen3Coder,
400    /// Qwen3 Coder Plus - Premium Qwen3 coding model with long context
401    OpenRouterQwen3CoderPlus,
402    /// Qwen3 Coder Flash - Latency optimised Qwen3 coding model
403    OpenRouterQwen3CoderFlash,
404    /// Qwen3 Coder 30B A3B Instruct - Large Mixture-of-Experts coding deployment
405    OpenRouterQwen3Coder30bA3bInstruct,
406    /// DeepSeek V3.2 Chat - Official chat model via OpenRouter
407    OpenRouterDeepseekChat,
408    /// DeepSeek V3.2 - Standard model with thinking support via OpenRouter
409    OpenRouterDeepSeekV32,
410    /// DeepSeek V3.2 Reasoner - Thinking mode via OpenRouter
411    OpenRouterDeepseekReasoner,
412    /// DeepSeek V3.2 Speciale - Enhanced reasoning model (no tool-use)
413    OpenRouterDeepSeekV32Speciale,
414    /// DeepSeek V3.2 Exp - Experimental DeepSeek V3.2 listing
415    OpenRouterDeepSeekV32Exp,
416    /// DeepSeek Chat v3.1 - Advanced DeepSeek model via OpenRouter
417    OpenRouterDeepSeekChatV31,
418    /// DeepSeek R1 - DeepSeek R1 reasoning model with chain-of-thought
419    OpenRouterDeepSeekR1,
420    /// OpenAI gpt-oss-120b - Open-weight 120B reasoning model via OpenRouter
421    OpenRouterOpenAIGptOss120b,
422    /// OpenAI gpt-oss-20b - Open-weight 20B deployment via OpenRouter
423    OpenRouterOpenAIGptOss20b,
424    /// OpenAI GPT-5 - OpenAI GPT-5 model accessed through OpenRouter
425    OpenRouterOpenAIGpt5,
426    /// OpenAI GPT-5 Codex - OpenRouter listing for GPT-5 Codex
427    OpenRouterOpenAIGpt5Codex,
428    /// OpenAI GPT-5 Chat - Chat optimised GPT-5 endpoint without tool use
429    OpenRouterOpenAIGpt5Chat,
430    /// OpenAI GPT-4o Search Preview - GPT-4o search preview endpoint via OpenRouter
431    OpenRouterOpenAIGpt4oSearchPreview,
432    /// OpenAI GPT-4o Mini Search Preview - GPT-4o mini search preview endpoint
433    OpenRouterOpenAIGpt4oMiniSearchPreview,
434    /// OpenAI ChatGPT-4o Latest - ChatGPT 4o latest listing via OpenRouter
435    OpenRouterOpenAIChatgpt4oLatest,
436    /// Claude Sonnet 4.5 - Anthropic Claude Sonnet 4.5 listing
437    OpenRouterAnthropicClaudeSonnet45,
438    /// Claude Haiku 4.5 - Anthropic Claude Haiku 4.5 listing
439    OpenRouterAnthropicClaudeHaiku45,
440    /// Claude Opus 4.1 - Anthropic Claude Opus 4.1 listing
441    OpenRouterAnthropicClaudeOpus41,
442    /// Amazon Nova 2 Lite - Amazon Nova 2 Lite model via OpenRouter
443    OpenRouterAmazonNova2LiteV1,
444    /// Mistral Large 3 2512 - Mistral Large 3 2512 model via OpenRouter
445    OpenRouterMistralaiMistralLarge2512,
446    /// DeepSeek V3.1 Nex N1 - Nex AGI DeepSeek V3.1 Nex N1 model via OpenRouter
447    OpenRouterNexAgiDeepseekV31NexN1,
448    /// OpenAI GPT-5.1 - OpenAI GPT-5.1 model accessed through OpenRouter
449    OpenRouterOpenAIGpt51,
450    /// OpenAI GPT-5.1-Codex - OpenRouter listing for GPT-5.1 Codex
451    OpenRouterOpenAIGpt51Codex,
452    /// OpenAI GPT-5.1-Codex-Max - OpenRouter listing for GPT-5.1 Codex Max
453    OpenRouterOpenAIGpt51CodexMax,
454    /// OpenAI GPT-5.1-Codex-Mini - OpenRouter listing for GPT-5.1 Codex Mini
455    OpenRouterOpenAIGpt51CodexMini,
456    /// OpenAI GPT-5.1 Chat - Chat optimised GPT-5.1 endpoint without tool use
457    OpenRouterOpenAIGpt51Chat,
458    /// OpenAI GPT-5.2 - OpenAI GPT-5.2 model accessed through OpenRouter
459    OpenRouterOpenAIGpt52,
460    /// OpenAI GPT-5.2 Chat - Chat optimised GPT-5.2 endpoint without tool use
461    OpenRouterOpenAIGpt52Chat,
462    /// OpenAI GPT-5.2-Codex - OpenRouter listing for GPT-5.2 Codex
463    OpenRouterOpenAIGpt52Codex,
464    /// OpenAI GPT-5.2 Pro - Professional tier GPT-5.2 model accessed through OpenRouter
465    OpenRouterOpenAIGpt52Pro,
466    /// OpenAI o1-pro - OpenAI o1-pro advanced reasoning model via OpenRouter
467    OpenRouterOpenAIO1Pro,
468    /// GLM 4.6V - Z.AI GLM 4.6V enhanced vision model
469    OpenRouterZaiGlm46V,
470    /// GLM 4.7 - Z.AI GLM 4.7 next-generation reasoning model
471    OpenRouterZaiGlm47,
472    /// GLM 4.7 Flash - Z.AI GLM-4.7-Flash lightweight model via OpenRouter
473    OpenRouterZaiGlm47Flash,
474}
475
476#[cfg(not(docsrs))]
477pub mod openrouter_generated {
478    include!(concat!(env!("OUT_DIR"), "/openrouter_metadata.rs"));
479}
480
481#[cfg(docsrs)]
482pub mod openrouter_generated {
483    #[derive(Clone, Copy)]
484    pub struct Entry {
485        pub variant: super::ModelId,
486        pub id: &'static str,
487        pub vendor: &'static str,
488        pub display: &'static str,
489        pub description: &'static str,
490        pub efficient: bool,
491        pub top_tier: bool,
492        pub generation: &'static str,
493        pub reasoning: bool,
494        pub tool_call: bool,
495    }
496
497    pub const ENTRIES: &[Entry] = &[];
498
499    #[derive(Clone, Copy)]
500    pub struct VendorModels {
501        pub vendor: &'static str,
502        pub models: &'static [super::ModelId],
503    }
504
505    pub const VENDOR_MODELS: &[VendorModels] = &[];
506
507    pub fn metadata_for(_model: super::ModelId) -> Option<super::OpenRouterMetadata> {
508        None
509    }
510
511    pub fn parse_model(_value: &str) -> Option<super::ModelId> {
512        None
513    }
514
515    pub fn vendor_groups() -> &'static [VendorModels] {
516        VENDOR_MODELS
517    }
518}
519
520impl ModelId {
521    fn openrouter_metadata(&self) -> Option<OpenRouterMetadata> {
522        #[cfg(not(docsrs))]
523        {
524            openrouter_generated::metadata_for(*self)
525        }
526        #[cfg(docsrs)]
527        {
528            None
529        }
530    }
531
532    fn parse_openrouter_model(value: &str) -> Option<Self> {
533        #[cfg(not(docsrs))]
534        {
535            openrouter_generated::parse_model(value)
536        }
537        #[cfg(docsrs)]
538        {
539            None
540        }
541    }
542
543    fn openrouter_vendor_groups() -> Vec<(&'static str, &'static [Self])> {
544        #[cfg(not(docsrs))]
545        {
546            openrouter_generated::vendor_groups()
547                .iter()
548                .map(|group| (group.vendor, group.models))
549                .collect()
550        }
551        #[cfg(docsrs)]
552        {
553            Vec::new()
554        }
555    }
556
557    fn openrouter_models() -> Vec<Self> {
558        Self::openrouter_vendor_groups()
559            .into_iter()
560            .flat_map(|(_, models)| models.iter().copied())
561            .collect()
562    }
563
564    /// Convert the model identifier to its string representation
565    /// used in API calls and configurations
566    pub fn as_str(&self) -> &'static str {
567        use crate::constants::models;
568        if let Some(meta) = self.openrouter_metadata() {
569            return meta.id;
570        }
571        match self {
572            // Gemini models
573            ModelId::Gemini25FlashPreview => models::GEMINI_2_5_FLASH_PREVIEW,
574            ModelId::Gemini25Flash => models::GEMINI_2_5_FLASH,
575            ModelId::Gemini25FlashLite => models::GEMINI_2_5_FLASH_LITE,
576            ModelId::Gemini25Pro => models::GEMINI_2_5_PRO,
577            ModelId::Gemini3ProPreview => models::GEMINI_3_PRO_PREVIEW,
578            // OpenAI models
579            ModelId::GPT5 => models::GPT_5,
580            ModelId::GPT5Codex => models::GPT_5_CODEX,
581            ModelId::GPT5Mini => models::GPT_5_MINI,
582            ModelId::GPT5Nano => models::GPT_5_NANO,
583            ModelId::CodexMiniLatest => models::CODEX_MINI_LATEST,
584            ModelId::OpenAIGptOss20b => models::openai::GPT_OSS_20B,
585            ModelId::OpenAIGptOss120b => models::openai::GPT_OSS_120B,
586            // Anthropic models
587            ModelId::ClaudeOpus45 => models::CLAUDE_OPUS_4_5,
588            ModelId::ClaudeOpus41 => models::CLAUDE_OPUS_4_1,
589            ModelId::ClaudeSonnet45 => models::CLAUDE_SONNET_4_5,
590            ModelId::ClaudeHaiku45 => models::CLAUDE_HAIKU_4_5,
591            ModelId::ClaudeSonnet4 => models::CLAUDE_SONNET_4_5_20250929,
592            // DeepSeek models
593            ModelId::DeepSeekChat => models::DEEPSEEK_CHAT,
594            ModelId::DeepSeekReasoner => models::DEEPSEEK_REASONER,
595            // xAI models
596            ModelId::XaiGrok4 => models::xai::GROK_4,
597            ModelId::XaiGrok4Mini => models::xai::GROK_4_MINI,
598            ModelId::XaiGrok4Code => models::xai::GROK_4_CODE,
599            ModelId::XaiGrok4CodeLatest => models::xai::GROK_4_CODE_LATEST,
600            ModelId::XaiGrok4Vision => models::xai::GROK_4_VISION,
601            // Z.AI models
602            ModelId::ZaiGlm4Plus => models::zai::GLM_4_PLUS,
603            ModelId::ZaiGlm4PlusDeepThinking => models::zai::GLM_4_PLUS_DEEP_THINKING,
604            ModelId::ZaiGlm47 => models::zai::GLM_4_7,
605            ModelId::ZaiGlm47DeepThinking => models::zai::GLM_4_7_DEEP_THINKING,
606            ModelId::ZaiGlm47Flash => models::zai::GLM_4_7_FLASH,
607            ModelId::ZaiGlm46 => models::zai::GLM_4_6,
608            ModelId::ZaiGlm46DeepThinking => models::zai::GLM_4_6_DEEP_THINKING,
609            ModelId::ZaiGlm46V => models::zai::GLM_4_6V,
610            ModelId::ZaiGlm46VFlash => models::zai::GLM_4_6V_FLASH,
611            ModelId::ZaiGlm46VFlashX => models::zai::GLM_4_6V_FLASHX,
612            ModelId::ZaiGlm45 => models::zai::GLM_4_5,
613            ModelId::ZaiGlm45DeepThinking => models::zai::GLM_4_5_DEEP_THINKING,
614            ModelId::ZaiGlm45Air => models::zai::GLM_4_5_AIR,
615            ModelId::ZaiGlm45X => models::zai::GLM_4_5_X,
616            ModelId::ZaiGlm45Airx => models::zai::GLM_4_5_AIRX,
617            ModelId::ZaiGlm45Flash => models::zai::GLM_4_5_FLASH,
618            ModelId::ZaiGlm45V => models::zai::GLM_4_5V,
619            ModelId::ZaiGlm432b0414128k => models::zai::GLM_4_32B_0414_128K,
620            // Ollama models
621            ModelId::OllamaGptOss20b => models::ollama::GPT_OSS_20B,
622            ModelId::OllamaGptOss20bCloud => models::ollama::GPT_OSS_20B_CLOUD,
623            ModelId::OllamaGptOss120bCloud => models::ollama::GPT_OSS_120B_CLOUD,
624            ModelId::OllamaQwen317b => models::ollama::QWEN3_1_7B,
625            ModelId::OllamaDeepseekV32Cloud => models::ollama::DEEPSEEK_V32_CLOUD,
626            ModelId::OllamaQwen3Next80bCloud => models::ollama::QWEN3_NEXT_80B_CLOUD,
627            ModelId::OllamaMistralLarge3675bCloud => models::ollama::MISTRAL_LARGE_3_675B_CLOUD,
628            ModelId::OllamaKimiK2ThinkingCloud => models::ollama::KIMI_K2_THINKING_CLOUD,
629            ModelId::OllamaGlm47Cloud => models::ollama::GLM_47_CLOUD,
630            ModelId::OllamaMinimaxM21Cloud => models::ollama::MINIMAX_M21_CLOUD,
631            ModelId::OllamaGemini3FlashPreviewCloud => models::ollama::GEMINI_3_FLASH_PREVIEW_CLOUD,
632
633            ModelId::OllamaQwen3Coder480bCloud => models::ollama::QWEN3_CODER_480B_CLOUD,
634            ModelId::OllamaGlm46Cloud => models::ollama::GLM_46_CLOUD,
635            ModelId::OllamaGemini3ProPreviewLatestCloud => {
636                models::ollama::GEMINI_3_PRO_PREVIEW_LATEST_CLOUD
637            }
638            ModelId::OllamaDevstral2123bCloud => models::ollama::DEVSTRAL_2_123B_CLOUD,
639            ModelId::OllamaMinimaxM2Cloud => models::ollama::MINIMAX_M2_CLOUD,
640            ModelId::OllamaNemotron3Nano30bCloud => models::ollama::NEMOTRON_3_NANO_30B_CLOUD,
641            // LM Studio models
642            ModelId::LmStudioMetaLlama38BInstruct => models::lmstudio::META_LLAMA_3_8B_INSTRUCT,
643            ModelId::LmStudioMetaLlama318BInstruct => models::lmstudio::META_LLAMA_31_8B_INSTRUCT,
644            ModelId::LmStudioQwen257BInstruct => models::lmstudio::QWEN25_7B_INSTRUCT,
645            ModelId::LmStudioGemma22BIt => models::lmstudio::GEMMA_2_2B_IT,
646            ModelId::LmStudioGemma29BIt => models::lmstudio::GEMMA_2_9B_IT,
647            ModelId::LmStudioPhi31Mini4kInstruct => models::lmstudio::PHI_31_MINI_4K_INSTRUCT,
648            // Hugging Face models
649            ModelId::HuggingFaceDeepseekV32 => models::huggingface::DEEPSEEK_V32,
650            ModelId::HuggingFaceOpenAIGptOss20b => models::huggingface::OPENAI_GPT_OSS_20B,
651            ModelId::HuggingFaceOpenAIGptOss120b => models::huggingface::OPENAI_GPT_OSS_120B,
652            ModelId::HuggingFaceGlm47 => models::huggingface::ZAI_GLM_47,
653            ModelId::HuggingFaceGlm47FlashNovita => models::huggingface::ZAI_GLM_47_FLASH_NOVITA,
654            ModelId::HuggingFaceKimiK2Thinking => models::huggingface::MOONSHOT_KIMI_K2_THINKING,
655            ModelId::HuggingFaceMinimaxM21Novita => models::huggingface::MINIMAX_M2_1_NOVITA,
656            ModelId::HuggingFaceDeepseekV32Novita => models::huggingface::DEEPSEEK_V32_NOVITA,
657            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
658                models::huggingface::XIAOMI_MIMO_V2_FLASH_NOVITA
659            }
660            // MiniMax models
661            ModelId::MinimaxM21 => models::minimax::MINIMAX_M2_1,
662            ModelId::MinimaxM21Lightning => models::minimax::MINIMAX_M2_1_LIGHTNING,
663            ModelId::MinimaxM2 => models::minimax::MINIMAX_M2,
664            // OpenRouter models
665            _ => unreachable!(),
666        }
667    }
668
669    /// Get the provider for this model
670    pub fn provider(&self) -> Provider {
671        if self.openrouter_metadata().is_some() {
672            return Provider::OpenRouter;
673        }
674        match self {
675            ModelId::Gemini25FlashPreview
676            | ModelId::Gemini25Flash
677            | ModelId::Gemini25FlashLite
678            | ModelId::Gemini25Pro
679            | ModelId::Gemini3ProPreview => Provider::Gemini,
680            ModelId::GPT5
681            | ModelId::GPT5Codex
682            | ModelId::GPT5Mini
683            | ModelId::GPT5Nano
684            | ModelId::CodexMiniLatest
685            | ModelId::OpenAIGptOss20b
686            | ModelId::OpenAIGptOss120b => Provider::OpenAI,
687            ModelId::ClaudeOpus45
688            | ModelId::ClaudeOpus41
689            | ModelId::ClaudeSonnet45
690            | ModelId::ClaudeHaiku45
691            | ModelId::ClaudeSonnet4 => Provider::Anthropic,
692            ModelId::DeepSeekChat | ModelId::DeepSeekReasoner => Provider::DeepSeek,
693            ModelId::HuggingFaceDeepseekV32
694            | ModelId::HuggingFaceOpenAIGptOss20b
695            | ModelId::HuggingFaceOpenAIGptOss120b
696            | ModelId::HuggingFaceGlm47
697            | ModelId::HuggingFaceGlm47FlashNovita
698            | ModelId::HuggingFaceKimiK2Thinking
699            | ModelId::HuggingFaceMinimaxM21Novita
700            | ModelId::HuggingFaceDeepseekV32Novita
701            | ModelId::HuggingFaceXiaomiMimoV2FlashNovita => Provider::HuggingFace,
702            ModelId::XaiGrok4
703            | ModelId::XaiGrok4Mini
704            | ModelId::XaiGrok4Code
705            | ModelId::XaiGrok4CodeLatest
706            | ModelId::XaiGrok4Vision => Provider::XAI,
707            ModelId::ZaiGlm4Plus
708            | ModelId::ZaiGlm4PlusDeepThinking
709            | ModelId::ZaiGlm47
710            | ModelId::ZaiGlm47DeepThinking
711            | ModelId::ZaiGlm47Flash
712            | ModelId::ZaiGlm46
713            | ModelId::ZaiGlm46DeepThinking
714            | ModelId::ZaiGlm46V
715            | ModelId::ZaiGlm46VFlash
716            | ModelId::ZaiGlm46VFlashX
717            | ModelId::ZaiGlm45
718            | ModelId::ZaiGlm45DeepThinking
719            | ModelId::ZaiGlm45Air
720            | ModelId::ZaiGlm45X
721            | ModelId::ZaiGlm45Airx
722            | ModelId::ZaiGlm45Flash
723            | ModelId::ZaiGlm45V
724            | ModelId::ZaiGlm432b0414128k => Provider::ZAI,
725            ModelId::OllamaGptOss20b
726            | ModelId::OllamaGptOss20bCloud
727            | ModelId::OllamaGptOss120bCloud
728            | ModelId::OllamaQwen317b
729            | ModelId::OllamaDeepseekV32Cloud
730            | ModelId::OllamaQwen3Next80bCloud
731            | ModelId::OllamaMistralLarge3675bCloud
732            | ModelId::OllamaKimiK2ThinkingCloud
733            | ModelId::OllamaQwen3Coder480bCloud
734            | ModelId::OllamaGlm46Cloud
735            | ModelId::OllamaGemini3ProPreviewLatestCloud
736            | ModelId::OllamaGemini3FlashPreviewCloud
737            | ModelId::OllamaDevstral2123bCloud
738            | ModelId::OllamaMinimaxM2Cloud
739            | ModelId::OllamaMinimaxM21Cloud
740            | ModelId::OllamaNemotron3Nano30bCloud
741            | ModelId::OllamaGlm47Cloud => Provider::Ollama,
742            ModelId::LmStudioMetaLlama38BInstruct
743            | ModelId::LmStudioMetaLlama318BInstruct
744            | ModelId::LmStudioQwen257BInstruct
745            | ModelId::LmStudioGemma22BIt
746            | ModelId::LmStudioGemma29BIt
747            | ModelId::LmStudioPhi31Mini4kInstruct => Provider::LmStudio,
748            ModelId::MinimaxM21 | ModelId::MinimaxM21Lightning | ModelId::MinimaxM2 => {
749                Provider::Minimax
750            }
751            _ => unreachable!(),
752        }
753    }
754
755    /// Whether this model supports configurable reasoning effort levels
756    pub fn supports_reasoning_effort(&self) -> bool {
757        self.provider().supports_reasoning_effort(self.as_str())
758    }
759
760    /// Get the display name for the model (human-readable)
761    pub fn display_name(&self) -> &'static str {
762        if let Some(meta) = self.openrouter_metadata() {
763            return meta.display;
764        }
765        match self {
766            // Gemini models
767            ModelId::Gemini25FlashPreview => "Gemini 2.5 Flash Preview",
768            ModelId::Gemini25Flash => "Gemini 2.5 Flash",
769            ModelId::Gemini25FlashLite => "Gemini 2.5 Flash Lite",
770            ModelId::Gemini25Pro => "Gemini 2.5 Pro",
771            ModelId::Gemini3ProPreview => "Gemini 3 Pro Preview",
772            // OpenAI models
773            ModelId::GPT5 => "GPT-5",
774            ModelId::GPT5Codex => "GPT-5 Codex",
775            ModelId::GPT5Mini => "GPT-5 Mini",
776            ModelId::GPT5Nano => "GPT-5 Nano",
777            ModelId::CodexMiniLatest => "Codex Mini Latest",
778            // Anthropic models
779            ModelId::ClaudeOpus45 => "Claude Opus 4.5",
780            ModelId::ClaudeOpus41 => "Claude Opus 4.1",
781            ModelId::ClaudeSonnet45 => "Claude Sonnet 4.5",
782            ModelId::ClaudeHaiku45 => "Claude Haiku 4.5",
783            ModelId::ClaudeSonnet4 => "Claude Sonnet 4",
784            // DeepSeek models
785            ModelId::DeepSeekChat => "DeepSeek V3.2 Chat",
786            ModelId::DeepSeekReasoner => "DeepSeek V3.2 Reasoner",
787            // xAI models
788            ModelId::XaiGrok4 => "Grok-4",
789            ModelId::XaiGrok4Mini => "Grok-4 Mini",
790            ModelId::XaiGrok4Code => "Grok-4 Code",
791            ModelId::XaiGrok4CodeLatest => "Grok-4 Code Latest",
792            ModelId::XaiGrok4Vision => "Grok-4 Vision",
793            // Z.AI models
794            ModelId::ZaiGlm4Plus => "GLM 4 Plus",
795            ModelId::ZaiGlm4PlusDeepThinking => "GLM 4 Plus Deep Thinking",
796            ModelId::ZaiGlm47 => "GLM 4.7",
797            ModelId::ZaiGlm47DeepThinking => "GLM 4.7 Deep Thinking",
798            ModelId::ZaiGlm47Flash => "GLM 4.7 Flash",
799            ModelId::ZaiGlm46 => "GLM 4.6",
800            ModelId::ZaiGlm46DeepThinking => "GLM 4.6 Deep Thinking",
801            ModelId::ZaiGlm46V => "GLM 4.6V",
802            ModelId::ZaiGlm46VFlash => "GLM 4.6V Flash",
803            ModelId::ZaiGlm46VFlashX => "GLM 4.6V FlashX",
804            ModelId::ZaiGlm45 => "GLM 4.5",
805            ModelId::ZaiGlm45DeepThinking => "GLM 4.5 Deep Thinking",
806            ModelId::ZaiGlm45Air => "GLM 4.5 Air",
807            ModelId::ZaiGlm45X => "GLM 4.5 X",
808            ModelId::ZaiGlm45Airx => "GLM 4.5 AirX",
809            ModelId::ZaiGlm45Flash => "GLM 4.5 Flash",
810            ModelId::ZaiGlm45V => "GLM 4.5V",
811            ModelId::ZaiGlm432b0414128k => "GLM 4 32B 0414 128K",
812            // Ollama models
813            ModelId::OllamaGptOss20b => "GPT-OSS 20B (local)",
814            ModelId::OllamaGptOss20bCloud => "GPT-OSS 20B (cloud)",
815            ModelId::OllamaGptOss120bCloud => "GPT-OSS 120B (cloud)",
816            ModelId::OllamaQwen317b => "Qwen3 1.7B (local)",
817            ModelId::OllamaDeepseekV32Cloud => "DeepSeek V3.2 (cloud)",
818            ModelId::OllamaQwen3Next80bCloud => "Qwen3 Next 80B (cloud)",
819            ModelId::OllamaMistralLarge3675bCloud => "Mistral Large 3 675B (cloud)",
820            ModelId::OllamaKimiK2ThinkingCloud => "Kimi K2 Thinking (cloud)",
821
822            ModelId::OllamaQwen3Coder480bCloud => "Qwen3 Coder 480B (cloud)",
823            ModelId::OllamaGlm46Cloud => "GLM-4.6 (cloud)",
824            ModelId::OllamaGemini3ProPreviewLatestCloud => "Gemini 3 Pro Preview (cloud)",
825            ModelId::OllamaGemini3FlashPreviewCloud => "Gemini 3 Flash Preview (cloud)",
826            ModelId::OllamaDevstral2123bCloud => "Devstral 2 123B (cloud)",
827            ModelId::OllamaMinimaxM2Cloud => "MiniMax-M2 (cloud)",
828            ModelId::OllamaGlm47Cloud => "GLM-4.7 (cloud)",
829            ModelId::OllamaMinimaxM21Cloud => "MiniMax-M2.1 (cloud)",
830            ModelId::OllamaNemotron3Nano30bCloud => "Nemotron-3-Nano 30B (cloud)",
831            ModelId::LmStudioMetaLlama38BInstruct => "Meta Llama 3 8B (LM Studio)",
832            ModelId::LmStudioMetaLlama318BInstruct => "Meta Llama 3.1 8B (LM Studio)",
833            ModelId::LmStudioQwen257BInstruct => "Qwen2.5 7B (LM Studio)",
834            ModelId::LmStudioGemma22BIt => "Gemma 2 2B (LM Studio)",
835            ModelId::LmStudioGemma29BIt => "Gemma 2 9B (LM Studio)",
836            ModelId::LmStudioPhi31Mini4kInstruct => "Phi-3.1 Mini 4K (LM Studio)",
837            // Hugging Face models
838            ModelId::HuggingFaceDeepseekV32 => "DeepSeek V3.2 (HF)",
839            ModelId::HuggingFaceOpenAIGptOss20b => "GPT-OSS 20B (HF)",
840            ModelId::HuggingFaceOpenAIGptOss120b => "GPT-OSS 120B (HF)",
841            ModelId::HuggingFaceGlm47 => "GLM-4.7 (HF)",
842            ModelId::HuggingFaceGlm47FlashNovita => "GLM-4.7-Flash (Novita)",
843            ModelId::HuggingFaceKimiK2Thinking => "Kimi K2 Thinking (HF)",
844            ModelId::HuggingFaceMinimaxM21Novita => "MiniMax-M2.1 (Novita)",
845            ModelId::HuggingFaceDeepseekV32Novita => "DeepSeek V3.2 (Novita)",
846            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => "MiMo-V2-Flash (Novita)",
847            // MiniMax models
848            ModelId::MinimaxM21 => "MiniMax-M2.1",
849            ModelId::MinimaxM21Lightning => "MiniMax-M2.1-lightning",
850            ModelId::MinimaxM2 => "MiniMax-M2",
851            // OpenRouter models
852            _ => unreachable!(),
853        }
854    }
855
856    /// Get a description of the model's characteristics
857    pub fn description(&self) -> &'static str {
858        if let Some(meta) = self.openrouter_metadata() {
859            return meta.description;
860        }
861        match self {
862            // Gemini models
863            ModelId::Gemini25FlashPreview => {
864                "Latest fast Gemini model with advanced multimodal capabilities"
865            }
866            ModelId::Gemini25Flash => {
867                "Legacy alias for Gemini 2.5 Flash Preview (same capabilities)"
868            }
869            ModelId::Gemini25FlashLite => {
870                "Legacy alias for Gemini 2.5 Flash Preview optimized for efficiency"
871            }
872            ModelId::Gemini25Pro => "Latest most capable Gemini model with reasoning",
873            ModelId::Gemini3ProPreview => {
874                "Preview of next-generation Gemini 3 Pro model with advanced reasoning and capabilities"
875            }
876            // OpenAI models
877            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
878            ModelId::GPT5Codex => {
879                "Code-focused GPT-5 variant optimized for tool calling and structured outputs"
880            }
881            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
882            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
883            ModelId::CodexMiniLatest => "Latest Codex model optimized for code generation",
884            ModelId::OpenAIGptOss20b => {
885                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
886            }
887            ModelId::OpenAIGptOss120b => {
888                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
889            }
890            // Anthropic models
891            ModelId::ClaudeOpus45 => {
892                "Latest flagship Anthropic model with exceptional reasoning capabilities"
893            }
894            ModelId::ClaudeOpus41 => {
895                "Latest flagship Anthropic model with exceptional reasoning capabilities"
896            }
897            ModelId::ClaudeSonnet45 => "Latest balanced Anthropic model for general tasks",
898            ModelId::ClaudeHaiku45 => {
899                "Latest efficient Anthropic model optimized for low-latency agent workflows"
900            }
901            ModelId::ClaudeSonnet4 => {
902                "Previous balanced Anthropic model maintained for compatibility"
903            }
904            // DeepSeek models
905            ModelId::DeepSeekChat => {
906                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
907            }
908            ModelId::DeepSeekReasoner => {
909                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
910            }
911            // xAI models
912            ModelId::XaiGrok4 => "Flagship Grok 4 model with long context and tool use",
913            ModelId::XaiGrok4Mini => "Efficient Grok 4 Mini tuned for low latency",
914            ModelId::XaiGrok4Code => "Code-specialized Grok 4 deployment with tool support",
915            ModelId::XaiGrok4CodeLatest => {
916                "Latest Grok 4 code model offering enhanced reasoning traces"
917            }
918            ModelId::XaiGrok4Vision => "Multimodal Grok 4 model with image understanding",
919            // Z.AI models
920            ModelId::ZaiGlm4Plus | ModelId::ZaiGlm4PlusDeepThinking => {
921                "Z.AI flagship model with top-tier capability in reasoning, writing, and tool-use"
922            }
923            ModelId::ZaiGlm47 | ModelId::ZaiGlm47DeepThinking => {
924                "Latest Z.AI GLM flagship with enhanced reasoning, 200k context and coding strengths"
925            }
926            ModelId::ZaiGlm47Flash => {
927                "Z.AI GLM-4.7-Flash 30B-class SOTA lightweight model - Completely free, high-speed, optimized for agentic coding with enhanced reasoning capabilities"
928            }
929            ModelId::ZaiGlm46 | ModelId::ZaiGlm46DeepThinking => {
930                "Previous Z.AI GLM flagship with long-context reasoning and coding strengths"
931            }
932            ModelId::ZaiGlm46V | ModelId::ZaiGlm46VFlash | ModelId::ZaiGlm46VFlashX => {
933                "Vision-capable GLM 4.6 release optimized for multimodal understanding"
934            }
935            ModelId::ZaiGlm45 | ModelId::ZaiGlm45DeepThinking => {
936                "Balanced GLM 4.5 release for general assistant tasks"
937            }
938            ModelId::ZaiGlm45Air => "Efficient GLM 4.5 Air variant tuned for lower latency",
939            ModelId::ZaiGlm45X => "Enhanced GLM 4.5 X variant with improved reasoning",
940            ModelId::ZaiGlm45Airx => "Hybrid GLM 4.5 AirX variant blending efficiency with quality",
941            ModelId::ZaiGlm45Flash => "Low-latency GLM 4.5 Flash optimized for responsiveness",
942            ModelId::ZaiGlm45V => "Vision-capable GLM 4.5 release for multimodal tasks",
943            ModelId::ZaiGlm432b0414128k => {
944                "Legacy GLM 4 32B deployment offering extended 128K context window"
945            }
946            ModelId::OllamaGptOss20b => {
947                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
948            }
949            ModelId::OllamaGptOss20bCloud => {
950                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
951            }
952            ModelId::OllamaGptOss120bCloud => {
953                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
954            }
955            ModelId::OllamaQwen317b => {
956                "Qwen3 1.7B served locally through Ollama without external API requirements"
957            }
958            ModelId::OllamaDeepseekV32Cloud => {
959                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
960            }
961            ModelId::OllamaQwen3Next80bCloud => {
962                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
963            }
964            ModelId::OllamaMistralLarge3675bCloud => {
965                "Mistral Large 3 675B reasoning model via Ollama Cloud for complex problem-solving"
966            }
967            ModelId::OllamaKimiK2ThinkingCloud => {
968                "MoonshotAI Kimi K2 thinking model via Ollama Cloud with explicit reasoning traces"
969            }
970            ModelId::OllamaGlm47Cloud => "Advancing the Coding Capability",
971            ModelId::OllamaMinimaxM21Cloud => {
972                "Exceptional multilingual capabilities to elevate code engineering"
973            }
974            ModelId::OllamaGemini3FlashPreviewCloud => {
975                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
976            }
977            ModelId::OllamaGemini3ProPreviewLatestCloud => {
978                "Gemini 3 Pro Preview Latest offers advanced reasoning and long context capabilities."
979            }
980            ModelId::OllamaNemotron3Nano30bCloud => {
981                "NVIDIA Nemotron-3-Nano 30B brings efficient excellence to code"
982            }
983
984            ModelId::OllamaQwen3Coder480bCloud => {
985                "Cloud-hosted Qwen3 Coder 480B model accessed through Ollama Cloud for coding tasks"
986            }
987            ModelId::OllamaGlm46Cloud => {
988                "Cloud-hosted GLM-4.6 model accessed through Ollama Cloud for reasoning and coding"
989            }
990            ModelId::OllamaMinimaxM2Cloud => {
991                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
992            }
993            ModelId::LmStudioMetaLlama38BInstruct => {
994                "Meta Llama 3 8B running through LM Studio's local OpenAI-compatible server"
995            }
996            ModelId::LmStudioMetaLlama318BInstruct => {
997                "Meta Llama 3.1 8B running through LM Studio's local OpenAI-compatible server"
998            }
999            ModelId::LmStudioQwen257BInstruct => {
1000                "Qwen2.5 7B hosted in LM Studio for local experimentation and coding tasks"
1001            }
1002            ModelId::LmStudioGemma22BIt => {
1003                "Gemma 2 2B IT deployed via LM Studio for lightweight on-device assistance"
1004            }
1005            ModelId::LmStudioGemma29BIt => {
1006                "Gemma 2 9B IT served locally via LM Studio when you need additional capacity"
1007            }
1008            ModelId::LmStudioPhi31Mini4kInstruct => {
1009                "Phi-3.1 Mini 4K hosted in LM Studio for compact reasoning and experimentation"
1010            }
1011            ModelId::MinimaxM21 => {
1012                "Latest MiniMax-M2.1 model with enhanced code understanding and reasoning"
1013            }
1014            ModelId::MinimaxM21Lightning => {
1015                "Fast version of MiniMax-M2.1 for rapid conversational tasks"
1016            }
1017            ModelId::MinimaxM2 => {
1018                "MiniMax-M2 via Anthropic-compatible API with reasoning and tool use"
1019            }
1020            ModelId::HuggingFaceDeepseekV32 => {
1021                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
1022            }
1023            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
1024            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
1025            ModelId::HuggingFaceGlm47 => "Z.AI GLM-4.7 via Hugging Face router",
1026            ModelId::HuggingFaceGlm47FlashNovita => {
1027                "Z.AI GLM-4.7-Flash via Novita inference provider on HuggingFace router. Lightweight model optimized for agentic coding."
1028            }
1029            ModelId::HuggingFaceKimiK2Thinking => {
1030                "MoonshotAI Kimi K2 Thinking via Hugging Face router"
1031            }
1032            ModelId::HuggingFaceMinimaxM21Novita => {
1033                "MiniMax-M2.1 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
1034            }
1035            ModelId::HuggingFaceDeepseekV32Novita => {
1036                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
1037            }
1038            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
1039                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
1040            }
1041            _ => unreachable!(),
1042        }
1043    }
1044
1045    /// Return the OpenRouter vendor slug when this identifier maps to a marketplace listing
1046    pub fn openrouter_vendor(&self) -> Option<&'static str> {
1047        self.openrouter_metadata().map(|meta| meta.vendor)
1048    }
1049
1050    /// Get all available models as a vector
1051    pub fn all_models() -> Vec<ModelId> {
1052        let mut models = vec![
1053            // Gemini models
1054            ModelId::Gemini25FlashPreview,
1055            ModelId::Gemini25Flash,
1056            ModelId::Gemini25FlashLite,
1057            ModelId::Gemini25Pro,
1058            // OpenAI models
1059            ModelId::GPT5,
1060            ModelId::GPT5Codex,
1061            ModelId::GPT5Mini,
1062            ModelId::GPT5Nano,
1063            ModelId::CodexMiniLatest,
1064            // Anthropic models
1065            ModelId::ClaudeOpus45,
1066            ModelId::ClaudeOpus41,
1067            ModelId::ClaudeSonnet45,
1068            ModelId::ClaudeHaiku45,
1069            ModelId::ClaudeSonnet4,
1070            // DeepSeek models
1071            ModelId::DeepSeekChat,
1072            ModelId::DeepSeekReasoner,
1073            // xAI models
1074            ModelId::XaiGrok4,
1075            ModelId::XaiGrok4Mini,
1076            ModelId::XaiGrok4Code,
1077            ModelId::XaiGrok4CodeLatest,
1078            ModelId::XaiGrok4Vision,
1079            // Z.AI models
1080            ModelId::ZaiGlm46,
1081            ModelId::ZaiGlm47Flash,
1082            ModelId::ZaiGlm45,
1083            ModelId::ZaiGlm45Air,
1084            ModelId::ZaiGlm45X,
1085            ModelId::ZaiGlm45Airx,
1086            ModelId::ZaiGlm45Flash,
1087            ModelId::ZaiGlm432b0414128k,
1088            // Ollama models
1089            ModelId::OllamaGptOss20b,
1090            ModelId::OllamaGptOss20bCloud,
1091            ModelId::OllamaGptOss120bCloud,
1092            ModelId::OllamaQwen317b,
1093            ModelId::OllamaDeepseekV32Cloud,
1094            ModelId::OllamaQwen3Next80bCloud,
1095            ModelId::OllamaMistralLarge3675bCloud,
1096            ModelId::OllamaKimiK2ThinkingCloud,
1097            ModelId::OllamaQwen3Coder480bCloud,
1098            ModelId::OllamaGlm46Cloud,
1099            ModelId::OllamaGlm47Cloud,
1100            ModelId::OllamaGemini3ProPreviewLatestCloud,
1101            ModelId::OllamaGemini3FlashPreviewCloud,
1102            ModelId::OllamaDevstral2123bCloud,
1103            ModelId::OllamaMinimaxM2Cloud,
1104            ModelId::OllamaMinimaxM21Cloud,
1105            ModelId::OllamaNemotron3Nano30bCloud,
1106            // LM Studio models
1107            ModelId::LmStudioMetaLlama38BInstruct,
1108            ModelId::LmStudioMetaLlama318BInstruct,
1109            ModelId::LmStudioQwen257BInstruct,
1110            ModelId::LmStudioGemma22BIt,
1111            ModelId::LmStudioGemma29BIt,
1112            ModelId::LmStudioPhi31Mini4kInstruct,
1113            // MiniMax models
1114            ModelId::MinimaxM21,
1115            ModelId::MinimaxM21Lightning,
1116            ModelId::MinimaxM2,
1117            // Hugging Face models
1118            ModelId::HuggingFaceDeepseekV32,
1119            ModelId::HuggingFaceOpenAIGptOss20b,
1120            ModelId::HuggingFaceOpenAIGptOss120b,
1121            ModelId::HuggingFaceGlm47,
1122            ModelId::HuggingFaceGlm47FlashNovita,
1123            ModelId::HuggingFaceKimiK2Thinking,
1124            ModelId::HuggingFaceMinimaxM21Novita,
1125            ModelId::HuggingFaceDeepseekV32Novita,
1126            ModelId::HuggingFaceXiaomiMimoV2FlashNovita,
1127        ];
1128        models.extend(Self::openrouter_models());
1129        models
1130    }
1131
1132    /// Get all models for a specific provider
1133    pub fn models_for_provider(provider: Provider) -> Vec<ModelId> {
1134        Self::all_models()
1135            .into_iter()
1136            .filter(|model| model.provider() == provider)
1137            .collect()
1138    }
1139
1140    /// Get recommended fallback models in order of preference
1141    pub fn fallback_models() -> Vec<ModelId> {
1142        vec![
1143            ModelId::Gemini25FlashPreview,
1144            ModelId::Gemini25Pro,
1145            ModelId::GPT5,
1146            ModelId::OpenAIGptOss20b,
1147            ModelId::ClaudeOpus45,
1148            ModelId::ClaudeOpus41,
1149            ModelId::ClaudeSonnet45,
1150            ModelId::DeepSeekReasoner,
1151            ModelId::XaiGrok4,
1152            ModelId::ZaiGlm46,
1153            ModelId::OpenRouterGrokCodeFast1,
1154        ]
1155    }
1156
1157    /// Get the default orchestrator model (more capable)
1158    pub fn default_orchestrator() -> Self {
1159        ModelId::Gemini25Pro
1160    }
1161
1162    /// Get the default subagent model (fast and efficient)
1163    pub fn default_subagent() -> Self {
1164        ModelId::Gemini25FlashPreview
1165    }
1166
1167    /// Get provider-specific defaults for orchestrator
1168    pub fn default_orchestrator_for_provider(provider: Provider) -> Self {
1169        match provider {
1170            Provider::Gemini => ModelId::Gemini25Pro,
1171            Provider::OpenAI => ModelId::GPT5,
1172            Provider::Anthropic => ModelId::ClaudeOpus45,
1173            Provider::DeepSeek => ModelId::DeepSeekReasoner,
1174            Provider::XAI => ModelId::XaiGrok4,
1175            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1176            Provider::Ollama => ModelId::OllamaGptOss20b,
1177            Provider::LmStudio => ModelId::LmStudioMetaLlama318BInstruct,
1178            Provider::ZAI => ModelId::ZaiGlm46,
1179            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1180            Provider::Minimax => ModelId::MinimaxM21,
1181            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss120b,
1182        }
1183    }
1184
1185    /// Get provider-specific defaults for subagent
1186    pub fn default_subagent_for_provider(provider: Provider) -> Self {
1187        match provider {
1188            Provider::Gemini => ModelId::Gemini25FlashPreview,
1189            Provider::OpenAI => ModelId::GPT5Mini,
1190            Provider::Anthropic => ModelId::ClaudeSonnet45,
1191            Provider::DeepSeek => ModelId::DeepSeekChat,
1192            Provider::XAI => ModelId::XaiGrok4Code,
1193            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1194            Provider::Ollama => ModelId::OllamaQwen317b,
1195            Provider::LmStudio => ModelId::LmStudioQwen257BInstruct,
1196            Provider::ZAI => ModelId::ZaiGlm45Flash,
1197            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1198            Provider::Minimax => ModelId::MinimaxM21Lightning,
1199            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss20b,
1200        }
1201    }
1202
1203    /// Get provider-specific defaults for single agent
1204    pub fn default_single_for_provider(provider: Provider) -> Self {
1205        match provider {
1206            Provider::Gemini => ModelId::Gemini25FlashPreview,
1207            Provider::OpenAI => ModelId::GPT5,
1208            Provider::Anthropic => ModelId::ClaudeOpus45,
1209            Provider::DeepSeek => ModelId::DeepSeekReasoner,
1210            Provider::XAI => ModelId::XaiGrok4,
1211            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1212            Provider::Ollama => ModelId::OllamaGptOss20b,
1213            Provider::LmStudio => ModelId::LmStudioMetaLlama318BInstruct,
1214            Provider::ZAI => ModelId::ZaiGlm46,
1215            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1216            Provider::Minimax => ModelId::MinimaxM21,
1217            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss120b,
1218        }
1219    }
1220
1221    /// Check if this is a "flash" variant (optimized for speed)
1222    pub fn is_flash_variant(&self) -> bool {
1223        matches!(
1224            self,
1225            ModelId::Gemini25FlashPreview
1226                | ModelId::Gemini25Flash
1227                | ModelId::Gemini25FlashLite
1228                | ModelId::ZaiGlm45Flash
1229                | ModelId::ZaiGlm46VFlash
1230                | ModelId::ZaiGlm46VFlashX
1231                | ModelId::MinimaxM21Lightning
1232                | ModelId::OllamaGemini3FlashPreviewCloud
1233        )
1234    }
1235
1236    /// Check if this is a "pro" variant (optimized for capability)
1237    pub fn is_pro_variant(&self) -> bool {
1238        matches!(
1239            self,
1240            ModelId::Gemini25Pro
1241                | ModelId::GPT5
1242                | ModelId::GPT5Codex
1243                | ModelId::ClaudeOpus41
1244                | ModelId::DeepSeekReasoner
1245                | ModelId::XaiGrok4
1246                | ModelId::ZaiGlm4Plus
1247                | ModelId::ZaiGlm4PlusDeepThinking
1248                | ModelId::ZaiGlm47
1249                | ModelId::ZaiGlm47DeepThinking
1250                | ModelId::ZaiGlm46
1251                | ModelId::ZaiGlm46DeepThinking
1252                | ModelId::MinimaxM21
1253                | ModelId::OllamaGlm47Cloud
1254                | ModelId::OllamaMinimaxM21Cloud
1255        )
1256    }
1257
1258    /// Check if this is an optimized/efficient variant
1259    pub fn is_efficient_variant(&self) -> bool {
1260        if let Some(meta) = self.openrouter_metadata() {
1261            return meta.efficient;
1262        }
1263        matches!(
1264            self,
1265            ModelId::Gemini25FlashPreview
1266                | ModelId::Gemini25Flash
1267                | ModelId::Gemini25FlashLite
1268                | ModelId::GPT5Mini
1269                | ModelId::GPT5Nano
1270                | ModelId::ClaudeHaiku45
1271                | ModelId::DeepSeekChat
1272                | ModelId::XaiGrok4Code
1273                | ModelId::ZaiGlm45Air
1274                | ModelId::ZaiGlm45Airx
1275                | ModelId::ZaiGlm45Flash
1276                | ModelId::ZaiGlm46VFlash
1277                | ModelId::ZaiGlm46VFlashX
1278        )
1279    }
1280
1281    /// Check if this is a top-tier model
1282    pub fn is_top_tier(&self) -> bool {
1283        if let Some(meta) = self.openrouter_metadata() {
1284            return meta.top_tier;
1285        }
1286        matches!(
1287            self,
1288            ModelId::Gemini25Pro
1289                | ModelId::GPT5
1290                | ModelId::GPT5Codex
1291                | ModelId::ClaudeOpus41
1292                | ModelId::ClaudeSonnet45
1293                | ModelId::ClaudeSonnet4
1294                | ModelId::DeepSeekReasoner
1295                | ModelId::XaiGrok4
1296                | ModelId::XaiGrok4CodeLatest
1297                | ModelId::ZaiGlm4Plus
1298                | ModelId::ZaiGlm4PlusDeepThinking
1299                | ModelId::ZaiGlm47
1300                | ModelId::ZaiGlm47DeepThinking
1301                | ModelId::ZaiGlm46
1302                | ModelId::ZaiGlm46DeepThinking
1303        )
1304    }
1305
1306    /// Determine whether the model is a reasoning-capable variant
1307    pub fn is_reasoning_variant(&self) -> bool {
1308        if let Some(meta) = self.openrouter_metadata() {
1309            return meta.reasoning;
1310        }
1311        matches!(
1312            self,
1313            ModelId::ZaiGlm4PlusDeepThinking
1314                | ModelId::ZaiGlm47DeepThinking
1315                | ModelId::ZaiGlm46DeepThinking
1316                | ModelId::ZaiGlm45DeepThinking
1317        ) || self.provider().supports_reasoning_effort(self.as_str())
1318    }
1319
1320    /// Determine whether the model supports tool calls/function execution
1321    pub fn supports_tool_calls(&self) -> bool {
1322        if let Some(meta) = self.openrouter_metadata() {
1323            return meta.tool_call;
1324        }
1325        true
1326    }
1327
1328    /// Get the generation/version string for this model
1329    pub fn generation(&self) -> &'static str {
1330        if let Some(meta) = self.openrouter_metadata() {
1331            return meta.generation;
1332        }
1333        match self {
1334            // Gemini generations
1335            ModelId::Gemini25FlashPreview
1336            | ModelId::Gemini25Flash
1337            | ModelId::Gemini25FlashLite
1338            | ModelId::Gemini25Pro => "2.5",
1339            ModelId::Gemini3ProPreview => "3",
1340            // OpenAI generations
1341            ModelId::GPT5
1342            | ModelId::GPT5Codex
1343            | ModelId::GPT5Mini
1344            | ModelId::GPT5Nano
1345            | ModelId::CodexMiniLatest => "5",
1346            // Anthropic generations
1347            ModelId::ClaudeOpus45 | ModelId::ClaudeSonnet45 | ModelId::ClaudeHaiku45 => "4.5",
1348            ModelId::ClaudeOpus41 => "4.1",
1349            ModelId::ClaudeSonnet4 => "4",
1350            // DeepSeek generations
1351            ModelId::DeepSeekChat | ModelId::DeepSeekReasoner => "V3.2-Exp",
1352            // xAI generations
1353            ModelId::XaiGrok4
1354            | ModelId::XaiGrok4Mini
1355            | ModelId::XaiGrok4Code
1356            | ModelId::XaiGrok4CodeLatest
1357            | ModelId::XaiGrok4Vision => "4",
1358            // Z.AI generations
1359            ModelId::ZaiGlm4Plus | ModelId::ZaiGlm4PlusDeepThinking => "4-Plus",
1360            ModelId::ZaiGlm47 | ModelId::ZaiGlm47DeepThinking => "4.7",
1361            ModelId::ZaiGlm46 | ModelId::ZaiGlm46DeepThinking => "4.6",
1362            ModelId::ZaiGlm46V | ModelId::ZaiGlm46VFlash | ModelId::ZaiGlm46VFlashX => "4.6",
1363            ModelId::ZaiGlm45
1364            | ModelId::ZaiGlm45DeepThinking
1365            | ModelId::ZaiGlm45Air
1366            | ModelId::ZaiGlm45X
1367            | ModelId::ZaiGlm45Airx
1368            | ModelId::ZaiGlm45Flash
1369            | ModelId::ZaiGlm45V => "4.5",
1370            ModelId::ZaiGlm432b0414128k => "4-32B",
1371            ModelId::OllamaGptOss20b => "oss",
1372            ModelId::OllamaGptOss20bCloud => "oss-cloud",
1373            ModelId::OllamaGptOss120bCloud => "oss-cloud",
1374            ModelId::OllamaQwen317b => "oss",
1375            ModelId::OllamaDeepseekV32Cloud => "deepseek-v3.2",
1376            ModelId::OllamaQwen3Next80bCloud => "qwen3-next",
1377            ModelId::OllamaMistralLarge3675bCloud => "mistral-large-3",
1378            ModelId::OllamaKimiK2ThinkingCloud => "kimi-k2-thinking",
1379            ModelId::OllamaQwen3Coder480bCloud => "qwen3-coder-cloud",
1380            ModelId::OllamaGlm46Cloud => "glm-cloud",
1381            ModelId::OllamaMinimaxM2Cloud => "minimax-cloud",
1382            ModelId::LmStudioMetaLlama38BInstruct => "meta-llama-3",
1383            ModelId::LmStudioMetaLlama318BInstruct => "meta-llama-3.1",
1384            ModelId::LmStudioQwen257BInstruct => "qwen2.5",
1385            ModelId::LmStudioGemma22BIt => "gemma-2",
1386            ModelId::LmStudioGemma29BIt => "gemma-2",
1387            ModelId::LmStudioPhi31Mini4kInstruct => "phi-3.1",
1388            ModelId::MinimaxM21 | ModelId::MinimaxM21Lightning => "M2.1",
1389            ModelId::MinimaxM2 => "M2",
1390            ModelId::HuggingFaceDeepseekV32 | ModelId::HuggingFaceDeepseekV32Novita => "v3.2",
1391            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => "v2-flash",
1392            ModelId::HuggingFaceGlm47 => "4.7",
1393            ModelId::HuggingFaceKimiK2Thinking => "k2",
1394            ModelId::HuggingFaceMinimaxM21Novita => "m2.1",
1395            ModelId::HuggingFaceOpenAIGptOss20b | ModelId::HuggingFaceOpenAIGptOss120b => "oss",
1396            _ => unreachable!(),
1397        }
1398    }
1399}
1400
1401impl fmt::Display for ModelId {
1402    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1403        write!(f, "{}", self.as_str())
1404    }
1405}
1406
1407impl FromStr for ModelId {
1408    type Err = ModelParseError;
1409
1410    fn from_str(s: &str) -> Result<Self, Self::Err> {
1411        if let Some(model) = Self::parse_openrouter_model(s) {
1412            return Ok(model);
1413        }
1414
1415        use crate::constants::models;
1416        match s {
1417            // Gemini models
1418            s if s == models::GEMINI_2_5_FLASH_PREVIEW => Ok(ModelId::Gemini25FlashPreview),
1419            s if s == models::GEMINI_2_5_FLASH => Ok(ModelId::Gemini25Flash),
1420            s if s == models::GEMINI_2_5_FLASH_LITE => Ok(ModelId::Gemini25FlashLite),
1421            s if s == models::GEMINI_2_5_PRO => Ok(ModelId::Gemini25Pro),
1422            s if s == models::GEMINI_3_PRO_PREVIEW => Ok(ModelId::Gemini3ProPreview),
1423            // OpenAI models
1424            s if s == models::GPT_5 => Ok(ModelId::GPT5),
1425            s if s == models::GPT_5_CODEX => Ok(ModelId::GPT5Codex),
1426            s if s == models::GPT_5_MINI => Ok(ModelId::GPT5Mini),
1427            s if s == models::GPT_5_NANO => Ok(ModelId::GPT5Nano),
1428            s if s == models::CODEX_MINI_LATEST => Ok(ModelId::CodexMiniLatest),
1429            s if s == models::openai::GPT_OSS_20B => Ok(ModelId::OpenAIGptOss20b),
1430            s if s == models::openai::GPT_OSS_120B => Ok(ModelId::OpenAIGptOss120b),
1431            // Anthropic models
1432            s if s == models::CLAUDE_OPUS_4_5 => Ok(ModelId::ClaudeOpus45),
1433            s if s == models::CLAUDE_OPUS_4_1 => Ok(ModelId::ClaudeOpus41),
1434            s if s == models::CLAUDE_SONNET_4_5 => Ok(ModelId::ClaudeSonnet45),
1435            s if s == models::CLAUDE_HAIKU_4_5 => Ok(ModelId::ClaudeHaiku45),
1436            s if s == models::CLAUDE_SONNET_4_5_20250929 => Ok(ModelId::ClaudeSonnet4),
1437            // DeepSeek models
1438            s if s == models::DEEPSEEK_CHAT => Ok(ModelId::DeepSeekChat),
1439            s if s == models::DEEPSEEK_REASONER => Ok(ModelId::DeepSeekReasoner),
1440            // xAI models
1441            s if s == models::xai::GROK_4 => Ok(ModelId::XaiGrok4),
1442            s if s == models::xai::GROK_4_MINI => Ok(ModelId::XaiGrok4Mini),
1443            s if s == models::xai::GROK_4_CODE => Ok(ModelId::XaiGrok4Code),
1444            s if s == models::xai::GROK_4_CODE_LATEST => Ok(ModelId::XaiGrok4CodeLatest),
1445            s if s == models::xai::GROK_4_VISION => Ok(ModelId::XaiGrok4Vision),
1446            // Z.AI models
1447            s if s == models::zai::GLM_4_PLUS => Ok(ModelId::ZaiGlm4Plus),
1448            s if s == models::zai::GLM_4_PLUS_DEEP_THINKING => Ok(ModelId::ZaiGlm4PlusDeepThinking),
1449            s if s == models::zai::GLM_4_7 => Ok(ModelId::ZaiGlm47),
1450            s if s == models::zai::GLM_4_7_DEEP_THINKING => Ok(ModelId::ZaiGlm47DeepThinking),
1451            s if s == models::zai::GLM_4_6 => Ok(ModelId::ZaiGlm46),
1452            s if s == models::zai::GLM_4_6_DEEP_THINKING => Ok(ModelId::ZaiGlm46DeepThinking),
1453            s if s == models::zai::GLM_4_6V => Ok(ModelId::ZaiGlm46V),
1454            s if s == models::zai::GLM_4_6V_FLASH => Ok(ModelId::ZaiGlm46VFlash),
1455            s if s == models::zai::GLM_4_6V_FLASHX => Ok(ModelId::ZaiGlm46VFlashX),
1456            s if s == models::zai::GLM_4_5 => Ok(ModelId::ZaiGlm45),
1457            s if s == models::zai::GLM_4_5_DEEP_THINKING => Ok(ModelId::ZaiGlm45DeepThinking),
1458            s if s == models::zai::GLM_4_5_AIR => Ok(ModelId::ZaiGlm45Air),
1459            s if s == models::zai::GLM_4_5_X => Ok(ModelId::ZaiGlm45X),
1460            s if s == models::zai::GLM_4_5_AIRX => Ok(ModelId::ZaiGlm45Airx),
1461            s if s == models::zai::GLM_4_5_FLASH => Ok(ModelId::ZaiGlm45Flash),
1462            s if s == models::zai::GLM_4_5V => Ok(ModelId::ZaiGlm45V),
1463            s if s == models::zai::GLM_4_32B_0414_128K => Ok(ModelId::ZaiGlm432b0414128k),
1464            s if s == models::ollama::GPT_OSS_20B => Ok(ModelId::OllamaGptOss20b),
1465            s if s == models::ollama::GPT_OSS_20B_CLOUD => Ok(ModelId::OllamaGptOss20bCloud),
1466            s if s == models::ollama::GPT_OSS_120B_CLOUD => Ok(ModelId::OllamaGptOss120bCloud),
1467            s if s == models::ollama::QWEN3_1_7B => Ok(ModelId::OllamaQwen317b),
1468            s if s == models::ollama::DEEPSEEK_V32_CLOUD => Ok(ModelId::OllamaDeepseekV32Cloud),
1469            s if s == models::ollama::QWEN3_NEXT_80B_CLOUD => Ok(ModelId::OllamaQwen3Next80bCloud),
1470            s if s == models::ollama::MISTRAL_LARGE_3_675B_CLOUD => {
1471                Ok(ModelId::OllamaMistralLarge3675bCloud)
1472            }
1473            s if s == models::ollama::KIMI_K2_THINKING_CLOUD => {
1474                Ok(ModelId::OllamaKimiK2ThinkingCloud)
1475            }
1476
1477            s if s == models::ollama::QWEN3_CODER_480B_CLOUD => {
1478                Ok(ModelId::OllamaQwen3Coder480bCloud)
1479            }
1480            s if s == models::ollama::GLM_46_CLOUD => Ok(ModelId::OllamaGlm46Cloud),
1481            s if s == models::ollama::GLM_47_CLOUD => Ok(ModelId::OllamaGlm47Cloud),
1482            s if s == models::ollama::GEMINI_3_PRO_PREVIEW_LATEST_CLOUD => {
1483                Ok(ModelId::OllamaGemini3ProPreviewLatestCloud)
1484            }
1485            s if s == models::ollama::GEMINI_3_FLASH_PREVIEW_CLOUD => {
1486                Ok(ModelId::OllamaGemini3FlashPreviewCloud)
1487            }
1488            s if s == models::ollama::MINIMAX_M2_CLOUD => Ok(ModelId::OllamaMinimaxM2Cloud),
1489            s if s == models::ollama::MINIMAX_M21_CLOUD => Ok(ModelId::OllamaMinimaxM21Cloud),
1490            s if s == models::ollama::DEVSTRAL_2_123B_CLOUD => {
1491                Ok(ModelId::OllamaDevstral2123bCloud)
1492            }
1493            s if s == models::ollama::NEMOTRON_3_NANO_30B_CLOUD => {
1494                Ok(ModelId::OllamaNemotron3Nano30bCloud)
1495            }
1496            s if s == models::lmstudio::META_LLAMA_3_8B_INSTRUCT => {
1497                Ok(ModelId::LmStudioMetaLlama38BInstruct)
1498            }
1499            s if s == models::lmstudio::META_LLAMA_31_8B_INSTRUCT => {
1500                Ok(ModelId::LmStudioMetaLlama318BInstruct)
1501            }
1502            s if s == models::lmstudio::QWEN25_7B_INSTRUCT => Ok(ModelId::LmStudioQwen257BInstruct),
1503            s if s == models::lmstudio::GEMMA_2_2B_IT => Ok(ModelId::LmStudioGemma22BIt),
1504            s if s == models::lmstudio::GEMMA_2_9B_IT => Ok(ModelId::LmStudioGemma29BIt),
1505            s if s == models::lmstudio::PHI_31_MINI_4K_INSTRUCT => {
1506                Ok(ModelId::LmStudioPhi31Mini4kInstruct)
1507            }
1508            s if s == models::minimax::MINIMAX_M2_1 => Ok(ModelId::MinimaxM21),
1509            s if s == models::minimax::MINIMAX_M2_1_LIGHTNING => Ok(ModelId::MinimaxM21Lightning),
1510            s if s == models::minimax::MINIMAX_M2 => Ok(ModelId::MinimaxM2),
1511            // Hugging Face models
1512            s if s == models::huggingface::DEEPSEEK_V32 => Ok(ModelId::HuggingFaceDeepseekV32),
1513            s if s == models::huggingface::OPENAI_GPT_OSS_20B => {
1514                Ok(ModelId::HuggingFaceOpenAIGptOss20b)
1515            }
1516            s if s == models::huggingface::OPENAI_GPT_OSS_120B => {
1517                Ok(ModelId::HuggingFaceOpenAIGptOss120b)
1518            }
1519            s if s == models::huggingface::ZAI_GLM_47 => Ok(ModelId::HuggingFaceGlm47),
1520            s if s == models::huggingface::MOONSHOT_KIMI_K2_THINKING => {
1521                Ok(ModelId::HuggingFaceKimiK2Thinking)
1522            }
1523            s if s == models::huggingface::MINIMAX_M2_1_NOVITA => {
1524                Ok(ModelId::HuggingFaceMinimaxM21Novita)
1525            }
1526            s if s == models::huggingface::DEEPSEEK_V32_NOVITA => {
1527                Ok(ModelId::HuggingFaceDeepseekV32Novita)
1528            }
1529            s if s == models::huggingface::XIAOMI_MIMO_V2_FLASH_NOVITA => {
1530                Ok(ModelId::HuggingFaceXiaomiMimoV2FlashNovita)
1531            }
1532            _ => Err(ModelParseError::InvalidModel(s.to_string())),
1533        }
1534    }
1535}
1536
1537/// Error type for model parsing failures
1538#[derive(Debug, Clone, PartialEq)]
1539pub enum ModelParseError {
1540    InvalidModel(String),
1541    InvalidProvider(String),
1542}
1543
1544impl fmt::Display for ModelParseError {
1545    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1546        match self {
1547            ModelParseError::InvalidModel(model) => {
1548                write!(
1549                    f,
1550                    "Invalid model identifier: '{}'. Supported models: {}",
1551                    model,
1552                    ModelId::all_models()
1553                        .iter()
1554                        .map(|m| m.as_str())
1555                        .collect::<Vec<_>>()
1556                        .join(", ")
1557                )
1558            }
1559            ModelParseError::InvalidProvider(provider) => {
1560                write!(
1561                    f,
1562                    "Invalid provider: '{}'. Supported providers: {}",
1563                    provider,
1564                    Provider::all_providers()
1565                        .iter()
1566                        .map(|p| p.to_string())
1567                        .collect::<Vec<_>>()
1568                        .join(", ")
1569                )
1570            }
1571        }
1572    }
1573}
1574
1575impl std::error::Error for ModelParseError {}
1576
1577#[cfg(test)]
1578mod tests {
1579    use super::*;
1580    use crate::constants::models;
1581
1582    #[test]
1583    fn test_model_string_conversion() {
1584        // Gemini models
1585        assert_eq!(
1586            ModelId::Gemini25FlashPreview.as_str(),
1587            models::GEMINI_2_5_FLASH_PREVIEW
1588        );
1589        assert_eq!(ModelId::Gemini25Flash.as_str(), models::GEMINI_2_5_FLASH);
1590        assert_eq!(
1591            ModelId::Gemini25FlashLite.as_str(),
1592            models::GEMINI_2_5_FLASH_LITE
1593        );
1594        assert_eq!(ModelId::Gemini25Pro.as_str(), models::GEMINI_2_5_PRO);
1595        // OpenAI models
1596        assert_eq!(ModelId::GPT5.as_str(), models::GPT_5);
1597        assert_eq!(ModelId::GPT5Codex.as_str(), models::GPT_5_CODEX);
1598        assert_eq!(ModelId::GPT5Mini.as_str(), models::GPT_5_MINI);
1599        assert_eq!(ModelId::GPT5Nano.as_str(), models::GPT_5_NANO);
1600        assert_eq!(ModelId::CodexMiniLatest.as_str(), models::CODEX_MINI_LATEST);
1601        // Anthropic models
1602        assert_eq!(ModelId::ClaudeSonnet45.as_str(), models::CLAUDE_SONNET_4_5);
1603        assert_eq!(ModelId::ClaudeHaiku45.as_str(), models::CLAUDE_HAIKU_4_5);
1604        assert_eq!(
1605            ModelId::ClaudeSonnet4.as_str(),
1606            models::CLAUDE_SONNET_4_5_20250929
1607        );
1608        assert_eq!(ModelId::ClaudeOpus41.as_str(), models::CLAUDE_OPUS_4_1);
1609        // DeepSeek models
1610        assert_eq!(ModelId::DeepSeekChat.as_str(), models::DEEPSEEK_CHAT);
1611        assert_eq!(
1612            ModelId::DeepSeekReasoner.as_str(),
1613            models::DEEPSEEK_REASONER
1614        );
1615        // xAI models
1616        assert_eq!(ModelId::XaiGrok4.as_str(), models::xai::GROK_4);
1617        assert_eq!(ModelId::XaiGrok4Mini.as_str(), models::xai::GROK_4_MINI);
1618        assert_eq!(ModelId::XaiGrok4Code.as_str(), models::xai::GROK_4_CODE);
1619        assert_eq!(
1620            ModelId::XaiGrok4CodeLatest.as_str(),
1621            models::xai::GROK_4_CODE_LATEST
1622        );
1623        assert_eq!(ModelId::XaiGrok4Vision.as_str(), models::xai::GROK_4_VISION);
1624        // Z.AI models
1625        assert_eq!(ModelId::ZaiGlm46.as_str(), models::zai::GLM_4_6);
1626        assert_eq!(ModelId::ZaiGlm45.as_str(), models::zai::GLM_4_5);
1627        assert_eq!(ModelId::ZaiGlm45Air.as_str(), models::zai::GLM_4_5_AIR);
1628        assert_eq!(ModelId::ZaiGlm45X.as_str(), models::zai::GLM_4_5_X);
1629        assert_eq!(ModelId::ZaiGlm45Airx.as_str(), models::zai::GLM_4_5_AIRX);
1630        assert_eq!(ModelId::ZaiGlm45Flash.as_str(), models::zai::GLM_4_5_FLASH);
1631        assert_eq!(
1632            ModelId::ZaiGlm432b0414128k.as_str(),
1633            models::zai::GLM_4_32B_0414_128K
1634        );
1635        for entry in openrouter_generated::ENTRIES {
1636            assert_eq!(entry.variant.as_str(), entry.id);
1637        }
1638    }
1639
1640    #[test]
1641    fn test_model_from_string() {
1642        // Gemini models
1643        assert_eq!(
1644            models::GEMINI_2_5_FLASH_PREVIEW.parse::<ModelId>().unwrap(),
1645            ModelId::Gemini25FlashPreview
1646        );
1647        assert_eq!(
1648            models::GEMINI_2_5_FLASH.parse::<ModelId>().unwrap(),
1649            ModelId::Gemini25Flash
1650        );
1651        assert_eq!(
1652            models::GEMINI_2_5_FLASH_LITE.parse::<ModelId>().unwrap(),
1653            ModelId::Gemini25FlashLite
1654        );
1655        assert_eq!(
1656            models::GEMINI_2_5_PRO.parse::<ModelId>().unwrap(),
1657            ModelId::Gemini25Pro
1658        );
1659        // OpenAI models
1660        assert_eq!(models::GPT_5.parse::<ModelId>().unwrap(), ModelId::GPT5);
1661        assert_eq!(
1662            models::GPT_5_CODEX.parse::<ModelId>().unwrap(),
1663            ModelId::GPT5Codex
1664        );
1665        assert_eq!(
1666            models::GPT_5_MINI.parse::<ModelId>().unwrap(),
1667            ModelId::GPT5Mini
1668        );
1669        assert_eq!(
1670            models::GPT_5_NANO.parse::<ModelId>().unwrap(),
1671            ModelId::GPT5Nano
1672        );
1673        assert_eq!(
1674            models::CODEX_MINI_LATEST.parse::<ModelId>().unwrap(),
1675            ModelId::CodexMiniLatest
1676        );
1677        assert_eq!(
1678            models::openai::GPT_OSS_20B.parse::<ModelId>().unwrap(),
1679            ModelId::OpenAIGptOss20b
1680        );
1681        assert_eq!(
1682            models::openai::GPT_OSS_120B.parse::<ModelId>().unwrap(),
1683            ModelId::OpenAIGptOss120b
1684        );
1685        // Anthropic models
1686        assert_eq!(
1687            models::CLAUDE_SONNET_4_5.parse::<ModelId>().unwrap(),
1688            ModelId::ClaudeSonnet45
1689        );
1690        assert_eq!(
1691            models::CLAUDE_HAIKU_4_5.parse::<ModelId>().unwrap(),
1692            ModelId::ClaudeHaiku45
1693        );
1694        assert_eq!(
1695            models::CLAUDE_SONNET_4_5_20250929
1696                .parse::<ModelId>()
1697                .unwrap(),
1698            ModelId::ClaudeSonnet4
1699        );
1700        assert_eq!(
1701            models::CLAUDE_OPUS_4_1.parse::<ModelId>().unwrap(),
1702            ModelId::ClaudeOpus41
1703        );
1704        // DeepSeek models
1705        assert_eq!(
1706            models::DEEPSEEK_CHAT.parse::<ModelId>().unwrap(),
1707            ModelId::DeepSeekChat
1708        );
1709        assert_eq!(
1710            models::DEEPSEEK_REASONER.parse::<ModelId>().unwrap(),
1711            ModelId::DeepSeekReasoner
1712        );
1713        // xAI models
1714        assert_eq!(
1715            models::xai::GROK_4.parse::<ModelId>().unwrap(),
1716            ModelId::XaiGrok4
1717        );
1718        assert_eq!(
1719            models::xai::GROK_4_MINI.parse::<ModelId>().unwrap(),
1720            ModelId::XaiGrok4Mini
1721        );
1722        assert_eq!(
1723            models::xai::GROK_4_CODE.parse::<ModelId>().unwrap(),
1724            ModelId::XaiGrok4Code
1725        );
1726        assert_eq!(
1727            models::xai::GROK_4_CODE_LATEST.parse::<ModelId>().unwrap(),
1728            ModelId::XaiGrok4CodeLatest
1729        );
1730        assert_eq!(
1731            models::xai::GROK_4_VISION.parse::<ModelId>().unwrap(),
1732            ModelId::XaiGrok4Vision
1733        );
1734        // Z.AI models
1735        assert_eq!(
1736            models::zai::GLM_4_6.parse::<ModelId>().unwrap(),
1737            ModelId::ZaiGlm46
1738        );
1739        assert_eq!(
1740            models::zai::GLM_4_5.parse::<ModelId>().unwrap(),
1741            ModelId::ZaiGlm45
1742        );
1743        assert_eq!(
1744            models::zai::GLM_4_5_AIR.parse::<ModelId>().unwrap(),
1745            ModelId::ZaiGlm45Air
1746        );
1747        assert_eq!(
1748            models::zai::GLM_4_5_X.parse::<ModelId>().unwrap(),
1749            ModelId::ZaiGlm45X
1750        );
1751        assert_eq!(
1752            models::zai::GLM_4_5_AIRX.parse::<ModelId>().unwrap(),
1753            ModelId::ZaiGlm45Airx
1754        );
1755        assert_eq!(
1756            models::zai::GLM_4_5_FLASH.parse::<ModelId>().unwrap(),
1757            ModelId::ZaiGlm45Flash
1758        );
1759        assert_eq!(
1760            models::zai::GLM_4_32B_0414_128K.parse::<ModelId>().unwrap(),
1761            ModelId::ZaiGlm432b0414128k
1762        );
1763        for entry in openrouter_generated::ENTRIES {
1764            assert_eq!(entry.id.parse::<ModelId>().unwrap(), entry.variant);
1765        }
1766        // Invalid model
1767        assert!("invalid-model".parse::<ModelId>().is_err());
1768    }
1769
1770    #[test]
1771    fn test_provider_parsing() {
1772        assert_eq!("gemini".parse::<Provider>().unwrap(), Provider::Gemini);
1773        assert_eq!("openai".parse::<Provider>().unwrap(), Provider::OpenAI);
1774        assert_eq!(
1775            "anthropic".parse::<Provider>().unwrap(),
1776            Provider::Anthropic
1777        );
1778        assert_eq!("deepseek".parse::<Provider>().unwrap(), Provider::DeepSeek);
1779        assert_eq!(
1780            "openrouter".parse::<Provider>().unwrap(),
1781            Provider::OpenRouter
1782        );
1783        assert_eq!("xai".parse::<Provider>().unwrap(), Provider::XAI);
1784        assert_eq!("zai".parse::<Provider>().unwrap(), Provider::ZAI);
1785        assert_eq!("moonshot".parse::<Provider>().unwrap(), Provider::Moonshot);
1786        assert_eq!("lmstudio".parse::<Provider>().unwrap(), Provider::LmStudio);
1787        assert!("invalid-provider".parse::<Provider>().is_err());
1788    }
1789
1790    #[test]
1791    fn test_model_providers() {
1792        assert_eq!(ModelId::Gemini25FlashPreview.provider(), Provider::Gemini);
1793        assert_eq!(ModelId::GPT5.provider(), Provider::OpenAI);
1794        assert_eq!(ModelId::GPT5Codex.provider(), Provider::OpenAI);
1795        assert_eq!(ModelId::ClaudeSonnet45.provider(), Provider::Anthropic);
1796        assert_eq!(ModelId::ClaudeHaiku45.provider(), Provider::Anthropic);
1797        assert_eq!(ModelId::ClaudeSonnet4.provider(), Provider::Anthropic);
1798        assert_eq!(ModelId::DeepSeekChat.provider(), Provider::DeepSeek);
1799        assert_eq!(ModelId::XaiGrok4.provider(), Provider::XAI);
1800        assert_eq!(ModelId::ZaiGlm46.provider(), Provider::ZAI);
1801        assert_eq!(ModelId::OllamaGptOss20b.provider(), Provider::Ollama);
1802        assert_eq!(ModelId::OllamaGptOss120bCloud.provider(), Provider::Ollama);
1803        assert_eq!(ModelId::OllamaQwen317b.provider(), Provider::Ollama);
1804        assert_eq!(
1805            ModelId::LmStudioMetaLlama38BInstruct.provider(),
1806            Provider::LmStudio
1807        );
1808        assert_eq!(
1809            ModelId::LmStudioMetaLlama318BInstruct.provider(),
1810            Provider::LmStudio
1811        );
1812        assert_eq!(
1813            ModelId::LmStudioQwen257BInstruct.provider(),
1814            Provider::LmStudio
1815        );
1816        assert_eq!(ModelId::LmStudioGemma22BIt.provider(), Provider::LmStudio);
1817        assert_eq!(ModelId::LmStudioGemma29BIt.provider(), Provider::LmStudio);
1818        assert_eq!(
1819            ModelId::LmStudioPhi31Mini4kInstruct.provider(),
1820            Provider::LmStudio
1821        );
1822        assert_eq!(
1823            ModelId::OpenRouterGrokCodeFast1.provider(),
1824            Provider::OpenRouter
1825        );
1826        assert_eq!(
1827            ModelId::OpenRouterAnthropicClaudeSonnet45.provider(),
1828            Provider::OpenRouter
1829        );
1830
1831        for entry in openrouter_generated::ENTRIES {
1832            assert_eq!(entry.variant.provider(), Provider::OpenRouter);
1833        }
1834    }
1835
1836    #[test]
1837    fn test_provider_defaults() {
1838        assert_eq!(
1839            ModelId::default_orchestrator_for_provider(Provider::Gemini),
1840            ModelId::Gemini25Pro
1841        );
1842        assert_eq!(
1843            ModelId::default_orchestrator_for_provider(Provider::OpenAI),
1844            ModelId::GPT5
1845        );
1846        assert_eq!(
1847            ModelId::default_orchestrator_for_provider(Provider::Anthropic),
1848            ModelId::ClaudeOpus45
1849        );
1850        assert_eq!(
1851            ModelId::default_orchestrator_for_provider(Provider::DeepSeek),
1852            ModelId::DeepSeekReasoner
1853        );
1854        assert_eq!(
1855            ModelId::default_orchestrator_for_provider(Provider::OpenRouter),
1856            ModelId::OpenRouterGrokCodeFast1
1857        );
1858        assert_eq!(
1859            ModelId::default_orchestrator_for_provider(Provider::XAI),
1860            ModelId::XaiGrok4
1861        );
1862        assert_eq!(
1863            ModelId::default_orchestrator_for_provider(Provider::Ollama),
1864            ModelId::OllamaGptOss20b
1865        );
1866        assert_eq!(
1867            ModelId::default_orchestrator_for_provider(Provider::LmStudio),
1868            ModelId::LmStudioMetaLlama318BInstruct
1869        );
1870        assert_eq!(
1871            ModelId::default_orchestrator_for_provider(Provider::ZAI),
1872            ModelId::ZaiGlm46
1873        );
1874
1875        assert_eq!(
1876            ModelId::default_subagent_for_provider(Provider::Gemini),
1877            ModelId::Gemini25FlashPreview
1878        );
1879        assert_eq!(
1880            ModelId::default_subagent_for_provider(Provider::OpenAI),
1881            ModelId::GPT5Mini
1882        );
1883        assert_eq!(
1884            ModelId::default_subagent_for_provider(Provider::Anthropic),
1885            ModelId::ClaudeSonnet45
1886        );
1887        assert_eq!(
1888            ModelId::default_subagent_for_provider(Provider::DeepSeek),
1889            ModelId::DeepSeekChat
1890        );
1891        assert_eq!(
1892            ModelId::default_subagent_for_provider(Provider::OpenRouter),
1893            ModelId::OpenRouterGrokCodeFast1
1894        );
1895        assert_eq!(
1896            ModelId::default_subagent_for_provider(Provider::XAI),
1897            ModelId::XaiGrok4Code
1898        );
1899        assert_eq!(
1900            ModelId::default_subagent_for_provider(Provider::Ollama),
1901            ModelId::OllamaQwen317b
1902        );
1903        assert_eq!(
1904            ModelId::default_subagent_for_provider(Provider::LmStudio),
1905            ModelId::LmStudioQwen257BInstruct
1906        );
1907        assert_eq!(
1908            ModelId::default_subagent_for_provider(Provider::ZAI),
1909            ModelId::ZaiGlm45Flash
1910        );
1911        // Moonshot provider now uses OpenRouter models instead of direct API
1912
1913        assert_eq!(
1914            ModelId::default_single_for_provider(Provider::DeepSeek),
1915            ModelId::DeepSeekReasoner
1916        );
1917        assert_eq!(
1918            ModelId::default_single_for_provider(Provider::Ollama),
1919            ModelId::OllamaGptOss20b
1920        );
1921        assert_eq!(
1922            ModelId::default_single_for_provider(Provider::LmStudio),
1923            ModelId::LmStudioMetaLlama318BInstruct
1924        );
1925    }
1926
1927    #[test]
1928    fn test_model_defaults() {
1929        assert_eq!(ModelId::default(), ModelId::Gemini25FlashPreview);
1930        assert_eq!(ModelId::default_orchestrator(), ModelId::Gemini25Pro);
1931        assert_eq!(ModelId::default_subagent(), ModelId::Gemini25FlashPreview);
1932    }
1933
1934    #[test]
1935    fn test_model_variants() {
1936        // Flash variants
1937        assert!(ModelId::Gemini25FlashPreview.is_flash_variant());
1938        assert!(ModelId::Gemini25Flash.is_flash_variant());
1939        assert!(ModelId::Gemini25FlashLite.is_flash_variant());
1940        assert!(!ModelId::GPT5.is_flash_variant());
1941        assert!(ModelId::ZaiGlm45Flash.is_flash_variant());
1942
1943        // Pro variants
1944        assert!(ModelId::Gemini25Pro.is_pro_variant());
1945        assert!(ModelId::GPT5.is_pro_variant());
1946        assert!(ModelId::GPT5Codex.is_pro_variant());
1947        assert!(ModelId::DeepSeekReasoner.is_pro_variant());
1948        assert!(ModelId::ZaiGlm46.is_pro_variant());
1949        assert!(!ModelId::Gemini25FlashPreview.is_pro_variant());
1950
1951        // Efficient variants
1952        assert!(ModelId::Gemini25FlashPreview.is_efficient_variant());
1953        assert!(ModelId::Gemini25Flash.is_efficient_variant());
1954        assert!(ModelId::Gemini25FlashLite.is_efficient_variant());
1955        assert!(ModelId::GPT5Mini.is_efficient_variant());
1956        assert!(ModelId::ClaudeHaiku45.is_efficient_variant());
1957        assert!(ModelId::XaiGrok4Code.is_efficient_variant());
1958        assert!(ModelId::DeepSeekChat.is_efficient_variant());
1959        assert!(ModelId::ZaiGlm45Air.is_efficient_variant());
1960        assert!(ModelId::ZaiGlm45Airx.is_efficient_variant());
1961        assert!(ModelId::ZaiGlm45Flash.is_efficient_variant());
1962        assert!(!ModelId::GPT5.is_efficient_variant());
1963
1964        for entry in openrouter_generated::ENTRIES {
1965            assert_eq!(entry.variant.is_efficient_variant(), entry.efficient);
1966        }
1967
1968        // Top tier models
1969        assert!(ModelId::Gemini25Pro.is_top_tier());
1970        assert!(ModelId::GPT5.is_top_tier());
1971        assert!(ModelId::GPT5Codex.is_top_tier());
1972        assert!(ModelId::ClaudeSonnet45.is_top_tier());
1973        assert!(ModelId::ClaudeSonnet4.is_top_tier());
1974        assert!(ModelId::XaiGrok4.is_top_tier());
1975        assert!(ModelId::XaiGrok4CodeLatest.is_top_tier());
1976        assert!(ModelId::DeepSeekReasoner.is_top_tier());
1977        assert!(ModelId::ZaiGlm46.is_top_tier());
1978        assert!(!ModelId::Gemini25FlashPreview.is_top_tier());
1979        assert!(!ModelId::ClaudeHaiku45.is_top_tier());
1980
1981        for entry in openrouter_generated::ENTRIES {
1982            assert_eq!(entry.variant.is_top_tier(), entry.top_tier);
1983        }
1984    }
1985
1986    #[test]
1987    fn test_model_generation() {
1988        // Gemini generations
1989        assert_eq!(ModelId::Gemini25FlashPreview.generation(), "2.5");
1990        assert_eq!(ModelId::Gemini25Flash.generation(), "2.5");
1991        assert_eq!(ModelId::Gemini25FlashLite.generation(), "2.5");
1992        assert_eq!(ModelId::Gemini25Pro.generation(), "2.5");
1993
1994        // OpenAI generations
1995        assert_eq!(ModelId::GPT5.generation(), "5");
1996        assert_eq!(ModelId::GPT5Codex.generation(), "5");
1997        assert_eq!(ModelId::GPT5Mini.generation(), "5");
1998        assert_eq!(ModelId::GPT5Nano.generation(), "5");
1999        assert_eq!(ModelId::CodexMiniLatest.generation(), "5");
2000
2001        // Anthropic generations
2002        assert_eq!(ModelId::ClaudeSonnet45.generation(), "4.5");
2003        assert_eq!(ModelId::ClaudeHaiku45.generation(), "4.5");
2004        assert_eq!(ModelId::ClaudeSonnet4.generation(), "4");
2005        assert_eq!(ModelId::ClaudeOpus41.generation(), "4.1");
2006
2007        // DeepSeek generations
2008        assert_eq!(ModelId::DeepSeekChat.generation(), "V3.2-Exp");
2009        assert_eq!(ModelId::DeepSeekReasoner.generation(), "V3.2-Exp");
2010
2011        // xAI generations
2012        assert_eq!(ModelId::XaiGrok4.generation(), "4");
2013        assert_eq!(ModelId::XaiGrok4Mini.generation(), "4");
2014        assert_eq!(ModelId::XaiGrok4Code.generation(), "4");
2015        assert_eq!(ModelId::XaiGrok4CodeLatest.generation(), "4");
2016        assert_eq!(ModelId::XaiGrok4Vision.generation(), "4");
2017        // Z.AI generations
2018        assert_eq!(ModelId::ZaiGlm46.generation(), "4.6");
2019        assert_eq!(ModelId::ZaiGlm45.generation(), "4.5");
2020        assert_eq!(ModelId::ZaiGlm45Air.generation(), "4.5");
2021        assert_eq!(ModelId::ZaiGlm45X.generation(), "4.5");
2022        assert_eq!(ModelId::ZaiGlm45Airx.generation(), "4.5");
2023        assert_eq!(ModelId::ZaiGlm45Flash.generation(), "4.5");
2024        assert_eq!(ModelId::ZaiGlm432b0414128k.generation(), "4-32B");
2025        assert_eq!(
2026            ModelId::LmStudioMetaLlama38BInstruct.generation(),
2027            "meta-llama-3"
2028        );
2029        assert_eq!(
2030            ModelId::LmStudioMetaLlama318BInstruct.generation(),
2031            "meta-llama-3.1"
2032        );
2033        assert_eq!(ModelId::LmStudioQwen257BInstruct.generation(), "qwen2.5");
2034        assert_eq!(ModelId::LmStudioGemma22BIt.generation(), "gemma-2");
2035        assert_eq!(ModelId::LmStudioGemma29BIt.generation(), "gemma-2");
2036        assert_eq!(ModelId::LmStudioPhi31Mini4kInstruct.generation(), "phi-3.1");
2037
2038        for entry in openrouter_generated::ENTRIES {
2039            assert_eq!(entry.variant.generation(), entry.generation);
2040        }
2041    }
2042
2043    #[test]
2044    fn test_models_for_provider() {
2045        let gemini_models = ModelId::models_for_provider(Provider::Gemini);
2046        assert!(gemini_models.contains(&ModelId::Gemini25Pro));
2047        assert!(!gemini_models.contains(&ModelId::GPT5));
2048
2049        let openai_models = ModelId::models_for_provider(Provider::OpenAI);
2050        assert!(openai_models.contains(&ModelId::GPT5));
2051        assert!(openai_models.contains(&ModelId::GPT5Codex));
2052        assert!(!openai_models.contains(&ModelId::Gemini25Pro));
2053
2054        let anthropic_models = ModelId::models_for_provider(Provider::Anthropic);
2055        assert!(anthropic_models.contains(&ModelId::ClaudeSonnet45));
2056        assert!(anthropic_models.contains(&ModelId::ClaudeHaiku45));
2057        assert!(anthropic_models.contains(&ModelId::ClaudeSonnet4));
2058        assert!(!anthropic_models.contains(&ModelId::GPT5));
2059
2060        let deepseek_models = ModelId::models_for_provider(Provider::DeepSeek);
2061        assert!(deepseek_models.contains(&ModelId::DeepSeekChat));
2062        assert!(deepseek_models.contains(&ModelId::DeepSeekReasoner));
2063
2064        let openrouter_models = ModelId::models_for_provider(Provider::OpenRouter);
2065        for entry in openrouter_generated::ENTRIES {
2066            assert!(openrouter_models.contains(&entry.variant));
2067        }
2068
2069        let xai_models = ModelId::models_for_provider(Provider::XAI);
2070        assert!(xai_models.contains(&ModelId::XaiGrok4));
2071        assert!(xai_models.contains(&ModelId::XaiGrok4Mini));
2072        assert!(xai_models.contains(&ModelId::XaiGrok4Code));
2073        assert!(xai_models.contains(&ModelId::XaiGrok4CodeLatest));
2074        assert!(xai_models.contains(&ModelId::XaiGrok4Vision));
2075
2076        let zai_models = ModelId::models_for_provider(Provider::ZAI);
2077        assert!(zai_models.contains(&ModelId::ZaiGlm46));
2078        assert!(zai_models.contains(&ModelId::ZaiGlm45));
2079        assert!(zai_models.contains(&ModelId::ZaiGlm45Air));
2080        assert!(zai_models.contains(&ModelId::ZaiGlm45X));
2081        assert!(zai_models.contains(&ModelId::ZaiGlm45Airx));
2082        assert!(zai_models.contains(&ModelId::ZaiGlm45Flash));
2083        assert!(zai_models.contains(&ModelId::ZaiGlm432b0414128k));
2084
2085        let moonshot_models = ModelId::models_for_provider(Provider::Moonshot);
2086        assert_eq!(moonshot_models.len(), 0); // No Moonshot models available
2087
2088        let ollama_models = ModelId::models_for_provider(Provider::Ollama);
2089        assert!(ollama_models.contains(&ModelId::OllamaGptOss20b));
2090        assert!(ollama_models.contains(&ModelId::OllamaGptOss20bCloud));
2091        assert!(ollama_models.contains(&ModelId::OllamaGptOss120bCloud));
2092        assert!(ollama_models.contains(&ModelId::OllamaQwen317b));
2093        assert!(ollama_models.contains(&ModelId::OllamaDeepseekV32Cloud));
2094        assert!(ollama_models.contains(&ModelId::OllamaQwen3Next80bCloud));
2095        assert!(ollama_models.contains(&ModelId::OllamaMistralLarge3675bCloud));
2096        assert!(ollama_models.contains(&ModelId::OllamaKimiK2ThinkingCloud));
2097        assert!(ollama_models.contains(&ModelId::OllamaQwen3Coder480bCloud));
2098        assert!(ollama_models.contains(&ModelId::OllamaGlm46Cloud));
2099        assert!(ollama_models.contains(&ModelId::OllamaGemini3ProPreviewLatestCloud));
2100        assert!(ollama_models.contains(&ModelId::OllamaGemini3FlashPreviewCloud));
2101        assert!(ollama_models.contains(&ModelId::OllamaDevstral2123bCloud));
2102        assert!(ollama_models.contains(&ModelId::OllamaMinimaxM2Cloud));
2103        assert!(ollama_models.contains(&ModelId::OllamaMinimaxM21Cloud));
2104        assert!(ollama_models.contains(&ModelId::OllamaNemotron3Nano30bCloud));
2105        assert!(ollama_models.contains(&ModelId::OllamaGlm47Cloud));
2106        assert_eq!(ollama_models.len(), 17); // 17 Ollama models
2107
2108        let lmstudio_models = ModelId::models_for_provider(Provider::LmStudio);
2109        assert!(lmstudio_models.contains(&ModelId::LmStudioMetaLlama38BInstruct));
2110        assert!(lmstudio_models.contains(&ModelId::LmStudioMetaLlama318BInstruct));
2111        assert!(lmstudio_models.contains(&ModelId::LmStudioQwen257BInstruct));
2112        assert!(lmstudio_models.contains(&ModelId::LmStudioGemma22BIt));
2113        assert!(lmstudio_models.contains(&ModelId::LmStudioGemma29BIt));
2114        assert!(lmstudio_models.contains(&ModelId::LmStudioPhi31Mini4kInstruct));
2115        assert_eq!(lmstudio_models.len(), 6);
2116    }
2117
2118    #[test]
2119    fn test_ollama_cloud_models() {
2120        use crate::constants::models;
2121
2122        // Test parsing of new Ollama cloud models
2123        let model_pairs = vec![
2124            (
2125                ModelId::OllamaGptOss20bCloud,
2126                models::ollama::GPT_OSS_20B_CLOUD,
2127            ),
2128            (
2129                ModelId::OllamaGptOss120bCloud,
2130                models::ollama::GPT_OSS_120B_CLOUD,
2131            ),
2132            (
2133                ModelId::OllamaDeepseekV32Cloud,
2134                models::ollama::DEEPSEEK_V32_CLOUD,
2135            ),
2136            (
2137                ModelId::OllamaQwen3Coder480bCloud,
2138                models::ollama::QWEN3_CODER_480B_CLOUD,
2139            ),
2140            (ModelId::OllamaGlm46Cloud, models::ollama::GLM_46_CLOUD),
2141            (
2142                ModelId::OllamaMinimaxM2Cloud,
2143                models::ollama::MINIMAX_M2_CLOUD,
2144            ),
2145        ];
2146
2147        for (model_id, expected_str) in model_pairs {
2148            assert_eq!(model_id.as_str(), expected_str);
2149            assert_eq!(ModelId::from_str(expected_str).unwrap(), model_id);
2150            assert_eq!(model_id.provider(), Provider::Ollama);
2151
2152            // Verify display names are not empty
2153            assert!(!model_id.display_name().is_empty());
2154
2155            // Verify descriptions are not empty
2156            assert!(!model_id.description().is_empty());
2157
2158            // Verify generation is not empty
2159            assert!(!model_id.generation().is_empty());
2160        }
2161    }
2162
2163    #[test]
2164    fn test_fallback_models() {
2165        let fallbacks = ModelId::fallback_models();
2166        assert!(!fallbacks.is_empty());
2167        assert!(fallbacks.contains(&ModelId::Gemini25Pro));
2168        assert!(fallbacks.contains(&ModelId::GPT5));
2169        assert!(fallbacks.contains(&ModelId::ClaudeOpus41));
2170        assert!(fallbacks.contains(&ModelId::ClaudeSonnet45));
2171        assert!(fallbacks.contains(&ModelId::DeepSeekReasoner));
2172        assert!(fallbacks.contains(&ModelId::XaiGrok4));
2173        assert!(fallbacks.contains(&ModelId::ZaiGlm46));
2174        assert!(fallbacks.contains(&ModelId::OpenRouterGrokCodeFast1));
2175    }
2176}