vtcode_config/
models.rs

1//! Model configuration and identification module
2//!
3//! This module provides a centralized enum for model identifiers and their configurations,
4//! replacing hardcoded model strings throughout the codebase for better maintainability.
5//! Read the model list in `docs/models.json`.
6
7use serde::{Deserialize, Serialize};
8use std::fmt;
9use std::str::FromStr;
10
11#[derive(Clone, Copy)]
12pub struct OpenRouterMetadata {
13    id: &'static str,
14    vendor: &'static str,
15    display: &'static str,
16    description: &'static str,
17    efficient: bool,
18    top_tier: bool,
19    generation: &'static str,
20    reasoning: bool,
21    tool_call: bool,
22}
23
24/// Supported AI model providers
25#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
26#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
27pub enum Provider {
28    /// Google Gemini models
29    #[default]
30    Gemini,
31    /// OpenAI GPT models
32    OpenAI,
33    /// Anthropic Claude models
34    Anthropic,
35    /// DeepSeek native models
36    DeepSeek,
37    /// OpenRouter marketplace models
38    OpenRouter,
39    /// Local Ollama models
40    Ollama,
41    /// LM Studio local server (OpenAI-compatible)
42    LmStudio,
43    /// Moonshot.ai models
44    Moonshot,
45    /// xAI Grok models
46    XAI,
47    /// Z.AI GLM models
48    ZAI,
49    /// MiniMax models
50    Minimax,
51    /// Hugging Face Inference Providers
52    HuggingFace,
53}
54
55impl Provider {
56    /// Get the default API key environment variable for this provider
57    pub fn default_api_key_env(&self) -> &'static str {
58        match self {
59            Provider::Gemini => "GEMINI_API_KEY",
60            Provider::OpenAI => "OPENAI_API_KEY",
61            Provider::Anthropic => "ANTHROPIC_API_KEY",
62            Provider::DeepSeek => "DEEPSEEK_API_KEY",
63            Provider::OpenRouter => "OPENROUTER_API_KEY",
64            Provider::Ollama => "OLLAMA_API_KEY",
65            Provider::LmStudio => "LMSTUDIO_API_KEY",
66            Provider::Moonshot => "MOONSHOT_API_KEY",
67            Provider::XAI => "XAI_API_KEY",
68            Provider::ZAI => "ZAI_API_KEY",
69            Provider::Minimax => "MINIMAX_API_KEY",
70            Provider::HuggingFace => "HF_TOKEN",
71        }
72    }
73
74    /// Get all supported providers
75    pub fn all_providers() -> Vec<Provider> {
76        vec![
77            Provider::OpenAI,
78            Provider::Anthropic,
79            Provider::Gemini,
80            Provider::DeepSeek,
81            Provider::OpenRouter,
82            Provider::Ollama,
83            Provider::LmStudio,
84            Provider::Moonshot,
85            Provider::XAI,
86            Provider::ZAI,
87            Provider::Minimax,
88            Provider::HuggingFace,
89        ]
90    }
91
92    /// Human-friendly label for display purposes
93    pub fn label(&self) -> &'static str {
94        match self {
95            Provider::Gemini => "Gemini",
96            Provider::OpenAI => "OpenAI",
97            Provider::Anthropic => "Anthropic",
98            Provider::DeepSeek => "DeepSeek",
99            Provider::OpenRouter => "OpenRouter",
100            Provider::Ollama => "Ollama",
101            Provider::LmStudio => "LM Studio",
102            Provider::Moonshot => "Moonshot",
103            Provider::XAI => "xAI",
104            Provider::ZAI => "Z.AI",
105            Provider::Minimax => "MiniMax",
106            Provider::HuggingFace => "Hugging Face",
107        }
108    }
109
110    /// Determine if the provider supports configurable reasoning effort for the model
111    pub fn supports_reasoning_effort(&self, model: &str) -> bool {
112        use crate::constants::models;
113
114        match self {
115            Provider::Gemini => models::google::REASONING_MODELS.contains(&model),
116            Provider::OpenAI => models::openai::REASONING_MODELS.contains(&model),
117            Provider::Anthropic => models::anthropic::REASONING_MODELS.contains(&model),
118            Provider::DeepSeek => model == models::deepseek::DEEPSEEK_REASONER,
119            Provider::OpenRouter => {
120                if let Ok(model_id) = ModelId::from_str(model) {
121                    return model_id.is_reasoning_variant();
122                }
123                models::openrouter::REASONING_MODELS.contains(&model)
124            }
125            Provider::Ollama => models::ollama::REASONING_LEVEL_MODELS.contains(&model),
126            Provider::LmStudio => false,
127            Provider::Moonshot => false,
128            Provider::XAI => model == models::xai::GROK_4 || model == models::xai::GROK_4_CODE,
129            Provider::ZAI => model == models::zai::GLM_4_6,
130            Provider::Minimax => {
131                model == models::minimax::MINIMAX_M2_1
132                    || model == models::minimax::MINIMAX_M2_1_LIGHTNING
133                    || model == models::minimax::MINIMAX_M2
134            }
135            Provider::HuggingFace => models::huggingface::REASONING_MODELS.contains(&model),
136        }
137    }
138}
139
140impl fmt::Display for Provider {
141    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
142        match self {
143            Provider::Gemini => write!(f, "gemini"),
144            Provider::OpenAI => write!(f, "openai"),
145            Provider::Anthropic => write!(f, "anthropic"),
146            Provider::DeepSeek => write!(f, "deepseek"),
147            Provider::OpenRouter => write!(f, "openrouter"),
148            Provider::Ollama => write!(f, "ollama"),
149            Provider::LmStudio => write!(f, "lmstudio"),
150            Provider::Moonshot => write!(f, "moonshot"),
151            Provider::XAI => write!(f, "xai"),
152            Provider::ZAI => write!(f, "zai"),
153            Provider::Minimax => write!(f, "minimax"),
154            Provider::HuggingFace => write!(f, "huggingface"),
155        }
156    }
157}
158
159impl FromStr for Provider {
160    type Err = ModelParseError;
161
162    fn from_str(s: &str) -> Result<Self, Self::Err> {
163        match s.to_lowercase().as_str() {
164            "gemini" => Ok(Provider::Gemini),
165            "openai" => Ok(Provider::OpenAI),
166            "anthropic" => Ok(Provider::Anthropic),
167            "deepseek" => Ok(Provider::DeepSeek),
168            "openrouter" => Ok(Provider::OpenRouter),
169            "ollama" => Ok(Provider::Ollama),
170            "lmstudio" => Ok(Provider::LmStudio),
171            "moonshot" => Ok(Provider::Moonshot),
172            "xai" => Ok(Provider::XAI),
173            "zai" => Ok(Provider::ZAI),
174            "minimax" => Ok(Provider::Minimax),
175            "huggingface" => Ok(Provider::HuggingFace),
176            _ => Err(ModelParseError::InvalidProvider(s.to_string())),
177        }
178    }
179}
180
181/// Centralized enum for all supported model identifiers
182#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
183#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
184pub enum ModelId {
185    // Gemini models
186    /// Gemini 2.5 Flash Preview - Latest fast model with advanced capabilities
187    #[default]
188    Gemini25FlashPreview,
189    /// Gemini 2.5 Flash - Legacy alias for flash preview
190    Gemini25Flash,
191    /// Gemini 2.5 Flash Lite - Legacy alias for flash preview (lite)
192    Gemini25FlashLite,
193    /// Gemini 2.5 Pro - Latest most capable Gemini model
194    Gemini25Pro,
195    /// Gemini 3 Pro Preview - Preview of next-generation Gemini model
196    Gemini3ProPreview,
197
198    // OpenAI models
199    /// GPT-5 - Latest most capable OpenAI model (2025-08-07)
200    GPT5,
201    /// GPT-5 Codex - Code-focused GPT-5 variant using the Responses API
202    GPT5Codex,
203    /// GPT-5 Mini - Latest efficient OpenAI model (2025-08-07)
204    GPT5Mini,
205    /// GPT-5 Nano - Latest most cost-effective OpenAI model (2025-08-07)
206    GPT5Nano,
207    /// Codex Mini Latest - Latest Codex model for code generation (2025-05-16)
208    CodexMiniLatest,
209    /// GPT-OSS 20B - OpenAI's open-source 20B parameter model using harmony
210    OpenAIGptOss20b,
211    /// GPT-OSS 120B - OpenAI's open-source 120B parameter model using harmony
212    OpenAIGptOss120b,
213
214    // Anthropic models
215    /// Claude Opus 4.5 - Latest flagship Anthropic model with exceptional reasoning (2025-11-01)
216    ClaudeOpus45,
217    /// Claude Opus 4.1 - Previous most capable Anthropic model (2025-08-05)
218    ClaudeOpus41,
219    /// Claude Sonnet 4.5 - Latest balanced Anthropic model (2025-10-15)
220    ClaudeSonnet45,
221    /// Claude Haiku 4.5 - Latest efficient Anthropic model (2025-10-15)
222    ClaudeHaiku45,
223    /// Claude Sonnet 4 - Previous balanced Anthropic model (2025-05-14)
224    ClaudeSonnet4,
225
226    // DeepSeek models
227    /// DeepSeek V3.2 Chat - Fast non-thinking mode
228    DeepSeekChat,
229    /// DeepSeek V3.2 Reasoner - Thinking mode with structured reasoning output
230    DeepSeekReasoner,
231    // Hugging Face models
232    /// DeepSeek V3.2 via Hugging Face router
233    HuggingFaceDeepseekV32,
234    /// OpenAI GPT-OSS 20B via Hugging Face router
235    HuggingFaceOpenAIGptOss20b,
236    /// OpenAI GPT-OSS 120B via Hugging Face router
237    HuggingFaceOpenAIGptOss120b,
238    /// Z.AI GLM-4.7 via Hugging Face router
239    HuggingFaceGlm47,
240    /// MoonshotAI Kimi K2 Thinking via Hugging Face router
241    HuggingFaceKimiK2Thinking,
242    /// MiniMax M2.1 via Novita on Hugging Face router - Enhanced reasoning
243    HuggingFaceMinimaxM21Novita,
244    /// DeepSeek V3.2 via Novita on Hugging Face router
245    HuggingFaceDeepseekV32Novita,
246    /// Xiaomi MiMo-V2-Flash via Novita on Hugging Face router
247    HuggingFaceXiaomiMimoV2FlashNovita,
248
249    // xAI models
250    /// Grok-4 - Flagship xAI model with advanced reasoning
251    XaiGrok4,
252    /// Grok-4 Mini - Efficient xAI model variant
253    XaiGrok4Mini,
254    /// Grok-4 Code - Code-focused Grok deployment
255    XaiGrok4Code,
256    /// Grok-4 Code Latest - Latest Grok code model with enhanced reasoning tools
257    XaiGrok4CodeLatest,
258    /// Grok-4 Vision - Multimodal Grok model
259    XaiGrok4Vision,
260
261    // Z.AI models
262    /// GLM-4-Plus - Flagship GLM model with top-tier reasoning
263    ZaiGlm4Plus,
264    /// GLM-4-Plus Deep Thinking - Flagship GLM with forced reasoning
265    ZaiGlm4PlusDeepThinking,
266    /// GLM-4.7 - Latest flagship GLM reasoning model
267    ZaiGlm47,
268    /// GLM-4.7 Deep Thinking - GLM-4.7 with forced reasoning
269    ZaiGlm47DeepThinking,
270    /// GLM-4.6 - Previous flagship GLM reasoning model
271    ZaiGlm46,
272    /// GLM-4.6 Deep Thinking - GLM-4.6 with forced reasoning
273    ZaiGlm46DeepThinking,
274    /// GLM-4.6V - Vision-capable GLM release
275    ZaiGlm46V,
276    /// GLM-4.6V-Flash - Latency-optimised vision GLM
277    ZaiGlm46VFlash,
278    /// GLM-4.6V-FlashX - Hybrid vision GLM variant
279    ZaiGlm46VFlashX,
280    /// GLM-4.5 - Balanced GLM release for general tasks
281    ZaiGlm45,
282    /// GLM-4.5 Deep Thinking - GLM-4.5 with forced reasoning
283    ZaiGlm45DeepThinking,
284    /// GLM-4.5-Air - Efficient GLM variant
285    ZaiGlm45Air,
286    /// GLM-4.5-X - Enhanced capability GLM variant
287    ZaiGlm45X,
288    /// GLM-4.5-AirX - Hybrid efficient GLM variant
289    ZaiGlm45Airx,
290    /// GLM-4.5-Flash - Low-latency GLM variant
291    ZaiGlm45Flash,
292    /// GLM-4.5V - Vision-capable GLM release
293    ZaiGlm45V,
294    /// GLM-4-32B-0414-128K - Legacy long-context GLM deployment
295    ZaiGlm432b0414128k,
296
297    // Ollama models
298    /// GPT-OSS 20B - Open-weight GPT-OSS 20B model served via Ollama locally
299    OllamaGptOss20b,
300    /// GPT-OSS 20B Cloud - Cloud-hosted GPT-OSS 20B served via Ollama Cloud
301    OllamaGptOss20bCloud,
302    /// GPT-OSS 120B Cloud - Cloud-hosted GPT-OSS 120B served via Ollama Cloud
303    OllamaGptOss120bCloud,
304    /// Qwen3 1.7B - Qwen3 1.7B model served via Ollama
305    OllamaQwen317b,
306    /// DeepSeek V3.2 Cloud - DeepSeek V3.2 reasoning deployment via Ollama Cloud
307    OllamaDeepseekV32Cloud,
308    /// Qwen3 Next 80B Cloud - Next-generation Qwen3 80B via Ollama Cloud
309    OllamaQwen3Next80bCloud,
310    /// Mistral Large 3 675B Cloud - Mistral Large 3 reasoning model via Ollama Cloud
311    OllamaMistralLarge3675bCloud,
312    /// Kimi K2 Thinking Cloud - MoonshotAI Kimi K2 thinking model via Ollama Cloud
313    OllamaKimiK2ThinkingCloud,
314    /// Qwen3 Coder 480B Cloud - Cloud-hosted Qwen3 Coder model served via Ollama Cloud
315    OllamaQwen3Coder480bCloud,
316    /// GLM-4.6 Cloud - Cloud-hosted GLM-4.6 model served via Ollama Cloud
317    OllamaGlm46Cloud,
318    /// Gemini 3 Pro Preview Latest Cloud - Google Gemini 3 Pro Preview via Ollama Cloud
319    OllamaGemini3ProPreviewLatestCloud,
320    /// Devstral 2 123B Cloud - Mistral Devstral 2 123B model via Ollama Cloud
321    OllamaDevstral2123bCloud,
322    /// MiniMax-M2 Cloud - Cloud-hosted MiniMax-M2 model served via Ollama Cloud
323    OllamaMinimaxM2Cloud,
324    /// GLM-4.7 Cloud - Cloud-hosted GLM-4.7 model served via Ollama Cloud
325    OllamaGlm47Cloud,
326    /// MiniMax-M2.1 Cloud - Cloud-hosted MiniMax-M2.1 model served via Ollama Cloud
327    OllamaMinimaxM21Cloud,
328    /// Gemini 3 Flash Preview Cloud - Google Gemini 3 Flash Preview via Ollama Cloud
329    OllamaGemini3FlashPreviewCloud,
330    /// Nemotron-3-Nano 30B Cloud - NVIDIA Nemotron-3-Nano 30B via Ollama Cloud
331    OllamaNemotron3Nano30bCloud,
332
333    // MiniMax models
334    /// MiniMax-M2.1 - Latest MiniMax model with enhanced code understanding and reasoning
335    MinimaxM21,
336    /// MiniMax-M2.1-lightning - Fast version of MiniMax-M2.1
337    MinimaxM21Lightning,
338    /// MiniMax-M2 - MiniMax reasoning-focused model
339    MinimaxM2,
340
341    // LM Studio models
342    /// Meta Llama 3 8B Instruct served locally via LM Studio
343    LmStudioMetaLlama38BInstruct,
344    /// Meta Llama 3.1 8B Instruct served locally via LM Studio
345    LmStudioMetaLlama318BInstruct,
346    /// Qwen2.5 7B Instruct served locally via LM Studio
347    LmStudioQwen257BInstruct,
348    /// Gemma 2 2B IT served locally via LM Studio
349    LmStudioGemma22BIt,
350    /// Gemma 2 9B IT served locally via LM Studio
351    LmStudioGemma29BIt,
352    /// Phi-3.1 Mini 4K Instruct served locally via LM Studio
353    LmStudioPhi31Mini4kInstruct,
354
355    // OpenRouter models
356    /// Grok Code Fast 1 - Fast OpenRouter coding model powered by xAI Grok
357    OpenRouterGrokCodeFast1,
358    /// Grok 4 Fast - Reasoning-focused Grok endpoint with transparent traces
359    OpenRouterGrok4Fast,
360    /// Grok 4.1 Fast - Enhanced Grok 4.1 fast inference with improved reasoning
361    OpenRouterGrok41Fast,
362    /// Grok 4 - Flagship Grok 4 endpoint exposed through OpenRouter
363    OpenRouterGrok4,
364    /// GLM 4.6 - Z.AI GLM 4.6 long-context reasoning model
365    OpenRouterZaiGlm46,
366    /// Kimi K2 0905 - MoonshotAI Kimi K2 0905 MoE release optimised for coding agents
367    OpenRouterMoonshotaiKimiK20905,
368    /// Kimi K2 Thinking - MoonshotAI reasoning-tier Kimi K2 release optimized for long-horizon agents
369    OpenRouterMoonshotaiKimiK2Thinking,
370    /// Qwen3 Max - Flagship Qwen3 mixture for general reasoning
371    OpenRouterQwen3Max,
372    /// Qwen3 235B A22B - Mixture-of-experts Qwen3 235B general model
373    OpenRouterQwen3235bA22b,
374    /// Qwen3 235B A22B Instruct 2507 - Instruction-tuned Qwen3 235B A22B
375    OpenRouterQwen3235bA22b2507,
376    /// Qwen3 235B A22B Thinking 2507 - Deliberative Qwen3 235B A22B reasoning release
377    OpenRouterQwen3235bA22bThinking2507,
378    /// Qwen3 32B - Dense 32B Qwen3 deployment
379    OpenRouterQwen332b,
380    /// Qwen3 30B A3B - Active-parameter 30B Qwen3 model
381    OpenRouterQwen330bA3b,
382    /// Qwen3 30B A3B Instruct 2507 - Instruction-tuned Qwen3 30B A3B
383    OpenRouterQwen330bA3bInstruct2507,
384    /// Qwen3 30B A3B Thinking 2507 - Deliberative Qwen3 30B A3B release
385    OpenRouterQwen330bA3bThinking2507,
386    /// Qwen3 14B - Lightweight Qwen3 14B model
387    OpenRouterQwen314b,
388    /// Qwen3 8B - Compact Qwen3 8B deployment
389    OpenRouterQwen38b,
390    /// Qwen3 Next 80B A3B Instruct - Next-generation Qwen3 instruction model
391    OpenRouterQwen3Next80bA3bInstruct,
392    /// Qwen3 Next 80B A3B Thinking - Next-generation Qwen3 reasoning release
393    OpenRouterQwen3Next80bA3bThinking,
394    /// Qwen3 Coder - Qwen3-based coding model tuned for IDE workflows
395    OpenRouterQwen3Coder,
396    /// Qwen3 Coder Plus - Premium Qwen3 coding model with long context
397    OpenRouterQwen3CoderPlus,
398    /// Qwen3 Coder Flash - Latency optimised Qwen3 coding model
399    OpenRouterQwen3CoderFlash,
400    /// Qwen3 Coder 30B A3B Instruct - Large Mixture-of-Experts coding deployment
401    OpenRouterQwen3Coder30bA3bInstruct,
402    /// DeepSeek V3.2 Chat - Official chat model via OpenRouter
403    OpenRouterDeepseekChat,
404    /// DeepSeek V3.2 - Standard model with thinking support via OpenRouter
405    OpenRouterDeepSeekV32,
406    /// DeepSeek V3.2 Reasoner - Thinking mode via OpenRouter
407    OpenRouterDeepseekReasoner,
408    /// DeepSeek V3.2 Speciale - Enhanced reasoning model (no tool-use)
409    OpenRouterDeepSeekV32Speciale,
410    /// DeepSeek V3.2 Exp - Experimental DeepSeek V3.2 listing
411    OpenRouterDeepSeekV32Exp,
412    /// DeepSeek Chat v3.1 - Advanced DeepSeek model via OpenRouter
413    OpenRouterDeepSeekChatV31,
414    /// DeepSeek R1 - DeepSeek R1 reasoning model with chain-of-thought
415    OpenRouterDeepSeekR1,
416    /// OpenAI gpt-oss-120b - Open-weight 120B reasoning model via OpenRouter
417    OpenRouterOpenAIGptOss120b,
418    /// OpenAI gpt-oss-20b - Open-weight 20B deployment via OpenRouter
419    OpenRouterOpenAIGptOss20b,
420    /// OpenAI GPT-5 - OpenAI GPT-5 model accessed through OpenRouter
421    OpenRouterOpenAIGpt5,
422    /// OpenAI GPT-5 Codex - OpenRouter listing for GPT-5 Codex
423    OpenRouterOpenAIGpt5Codex,
424    /// OpenAI GPT-5 Chat - Chat optimised GPT-5 endpoint without tool use
425    OpenRouterOpenAIGpt5Chat,
426    /// OpenAI GPT-4o Search Preview - GPT-4o search preview endpoint via OpenRouter
427    OpenRouterOpenAIGpt4oSearchPreview,
428    /// OpenAI GPT-4o Mini Search Preview - GPT-4o mini search preview endpoint
429    OpenRouterOpenAIGpt4oMiniSearchPreview,
430    /// OpenAI ChatGPT-4o Latest - ChatGPT 4o latest listing via OpenRouter
431    OpenRouterOpenAIChatgpt4oLatest,
432    /// Claude Sonnet 4.5 - Anthropic Claude Sonnet 4.5 listing
433    OpenRouterAnthropicClaudeSonnet45,
434    /// Claude Haiku 4.5 - Anthropic Claude Haiku 4.5 listing
435    OpenRouterAnthropicClaudeHaiku45,
436    /// Claude Opus 4.1 - Anthropic Claude Opus 4.1 listing
437    OpenRouterAnthropicClaudeOpus41,
438    /// Amazon Nova 2 Lite - Amazon Nova 2 Lite model via OpenRouter
439    OpenRouterAmazonNova2LiteV1,
440    /// Mistral Large 3 2512 - Mistral Large 3 2512 model via OpenRouter
441    OpenRouterMistralaiMistralLarge2512,
442    /// DeepSeek V3.1 Nex N1 - Nex AGI DeepSeek V3.1 Nex N1 model via OpenRouter
443    OpenRouterNexAgiDeepseekV31NexN1,
444    /// OpenAI GPT-5.1 - OpenAI GPT-5.1 model accessed through OpenRouter
445    OpenRouterOpenAIGpt51,
446    /// OpenAI GPT-5.1-Codex - OpenRouter listing for GPT-5.1 Codex
447    OpenRouterOpenAIGpt51Codex,
448    /// OpenAI GPT-5.1-Codex-Max - OpenRouter listing for GPT-5.1 Codex Max
449    OpenRouterOpenAIGpt51CodexMax,
450    /// OpenAI GPT-5.1-Codex-Mini - OpenRouter listing for GPT-5.1 Codex Mini
451    OpenRouterOpenAIGpt51CodexMini,
452    /// OpenAI GPT-5.1 Chat - Chat optimised GPT-5.1 endpoint without tool use
453    OpenRouterOpenAIGpt51Chat,
454    /// OpenAI GPT-5.2 - OpenAI GPT-5.2 model accessed through OpenRouter
455    OpenRouterOpenAIGpt52,
456    /// OpenAI GPT-5.2 Chat - Chat optimised GPT-5.2 endpoint without tool use
457    OpenRouterOpenAIGpt52Chat,
458    /// OpenAI GPT-5.2-Codex - OpenRouter listing for GPT-5.2 Codex
459    OpenRouterOpenAIGpt52Codex,
460    /// OpenAI GPT-5.2 Pro - Professional tier GPT-5.2 model accessed through OpenRouter
461    OpenRouterOpenAIGpt52Pro,
462    /// OpenAI o1-pro - OpenAI o1-pro advanced reasoning model via OpenRouter
463    OpenRouterOpenAIO1Pro,
464    /// GLM 4.6V - Z.AI GLM 4.6V enhanced vision model
465    OpenRouterZaiGlm46V,
466    /// GLM 4.7 - Z.AI GLM 4.7 next-generation reasoning model
467    OpenRouterZaiGlm47,
468}
469
470#[cfg(not(docsrs))]
471pub mod openrouter_generated {
472    include!(concat!(env!("OUT_DIR"), "/openrouter_metadata.rs"));
473}
474
475#[cfg(docsrs)]
476pub mod openrouter_generated {
477    #[derive(Clone, Copy)]
478    pub struct Entry {
479        pub variant: super::ModelId,
480        pub id: &'static str,
481        pub vendor: &'static str,
482        pub display: &'static str,
483        pub description: &'static str,
484        pub efficient: bool,
485        pub top_tier: bool,
486        pub generation: &'static str,
487        pub reasoning: bool,
488        pub tool_call: bool,
489    }
490
491    pub const ENTRIES: &[Entry] = &[];
492
493    #[derive(Clone, Copy)]
494    pub struct VendorModels {
495        pub vendor: &'static str,
496        pub models: &'static [super::ModelId],
497    }
498
499    pub const VENDOR_MODELS: &[VendorModels] = &[];
500
501    pub fn metadata_for(_model: super::ModelId) -> Option<super::OpenRouterMetadata> {
502        None
503    }
504
505    pub fn parse_model(_value: &str) -> Option<super::ModelId> {
506        None
507    }
508
509    pub fn vendor_groups() -> &'static [VendorModels] {
510        VENDOR_MODELS
511    }
512}
513
514impl ModelId {
515    fn openrouter_metadata(&self) -> Option<OpenRouterMetadata> {
516        #[cfg(not(docsrs))]
517        {
518            openrouter_generated::metadata_for(*self)
519        }
520        #[cfg(docsrs)]
521        {
522            None
523        }
524    }
525
526    fn parse_openrouter_model(value: &str) -> Option<Self> {
527        #[cfg(not(docsrs))]
528        {
529            openrouter_generated::parse_model(value)
530        }
531        #[cfg(docsrs)]
532        {
533            None
534        }
535    }
536
537    fn openrouter_vendor_groups() -> Vec<(&'static str, &'static [Self])> {
538        #[cfg(not(docsrs))]
539        {
540            openrouter_generated::vendor_groups()
541                .iter()
542                .map(|group| (group.vendor, group.models))
543                .collect()
544        }
545        #[cfg(docsrs)]
546        {
547            Vec::new()
548        }
549    }
550
551    fn openrouter_models() -> Vec<Self> {
552        Self::openrouter_vendor_groups()
553            .into_iter()
554            .flat_map(|(_, models)| models.iter().copied())
555            .collect()
556    }
557
558    /// Convert the model identifier to its string representation
559    /// used in API calls and configurations
560    pub fn as_str(&self) -> &'static str {
561        use crate::constants::models;
562        if let Some(meta) = self.openrouter_metadata() {
563            return meta.id;
564        }
565        match self {
566            // Gemini models
567            ModelId::Gemini25FlashPreview => models::GEMINI_2_5_FLASH_PREVIEW,
568            ModelId::Gemini25Flash => models::GEMINI_2_5_FLASH,
569            ModelId::Gemini25FlashLite => models::GEMINI_2_5_FLASH_LITE,
570            ModelId::Gemini25Pro => models::GEMINI_2_5_PRO,
571            ModelId::Gemini3ProPreview => models::GEMINI_3_PRO_PREVIEW,
572            // OpenAI models
573            ModelId::GPT5 => models::GPT_5,
574            ModelId::GPT5Codex => models::GPT_5_CODEX,
575            ModelId::GPT5Mini => models::GPT_5_MINI,
576            ModelId::GPT5Nano => models::GPT_5_NANO,
577            ModelId::CodexMiniLatest => models::CODEX_MINI_LATEST,
578            ModelId::OpenAIGptOss20b => models::openai::GPT_OSS_20B,
579            ModelId::OpenAIGptOss120b => models::openai::GPT_OSS_120B,
580            // Anthropic models
581            ModelId::ClaudeOpus45 => models::CLAUDE_OPUS_4_5,
582            ModelId::ClaudeOpus41 => models::CLAUDE_OPUS_4_1,
583            ModelId::ClaudeSonnet45 => models::CLAUDE_SONNET_4_5,
584            ModelId::ClaudeHaiku45 => models::CLAUDE_HAIKU_4_5,
585            ModelId::ClaudeSonnet4 => models::CLAUDE_SONNET_4_5_20250929,
586            // DeepSeek models
587            ModelId::DeepSeekChat => models::DEEPSEEK_CHAT,
588            ModelId::DeepSeekReasoner => models::DEEPSEEK_REASONER,
589            // xAI models
590            ModelId::XaiGrok4 => models::xai::GROK_4,
591            ModelId::XaiGrok4Mini => models::xai::GROK_4_MINI,
592            ModelId::XaiGrok4Code => models::xai::GROK_4_CODE,
593            ModelId::XaiGrok4CodeLatest => models::xai::GROK_4_CODE_LATEST,
594            ModelId::XaiGrok4Vision => models::xai::GROK_4_VISION,
595            // Z.AI models
596            ModelId::ZaiGlm4Plus => models::zai::GLM_4_PLUS,
597            ModelId::ZaiGlm4PlusDeepThinking => models::zai::GLM_4_PLUS_DEEP_THINKING,
598            ModelId::ZaiGlm47 => models::zai::GLM_4_7,
599            ModelId::ZaiGlm47DeepThinking => models::zai::GLM_4_7_DEEP_THINKING,
600            ModelId::ZaiGlm46 => models::zai::GLM_4_6,
601            ModelId::ZaiGlm46DeepThinking => models::zai::GLM_4_6_DEEP_THINKING,
602            ModelId::ZaiGlm46V => models::zai::GLM_4_6V,
603            ModelId::ZaiGlm46VFlash => models::zai::GLM_4_6V_FLASH,
604            ModelId::ZaiGlm46VFlashX => models::zai::GLM_4_6V_FLASHX,
605            ModelId::ZaiGlm45 => models::zai::GLM_4_5,
606            ModelId::ZaiGlm45DeepThinking => models::zai::GLM_4_5_DEEP_THINKING,
607            ModelId::ZaiGlm45Air => models::zai::GLM_4_5_AIR,
608            ModelId::ZaiGlm45X => models::zai::GLM_4_5_X,
609            ModelId::ZaiGlm45Airx => models::zai::GLM_4_5_AIRX,
610            ModelId::ZaiGlm45Flash => models::zai::GLM_4_5_FLASH,
611            ModelId::ZaiGlm45V => models::zai::GLM_4_5V,
612            ModelId::ZaiGlm432b0414128k => models::zai::GLM_4_32B_0414_128K,
613            // Ollama models
614            ModelId::OllamaGptOss20b => models::ollama::GPT_OSS_20B,
615            ModelId::OllamaGptOss20bCloud => models::ollama::GPT_OSS_20B_CLOUD,
616            ModelId::OllamaGptOss120bCloud => models::ollama::GPT_OSS_120B_CLOUD,
617            ModelId::OllamaQwen317b => models::ollama::QWEN3_1_7B,
618            ModelId::OllamaDeepseekV32Cloud => models::ollama::DEEPSEEK_V32_CLOUD,
619            ModelId::OllamaQwen3Next80bCloud => models::ollama::QWEN3_NEXT_80B_CLOUD,
620            ModelId::OllamaMistralLarge3675bCloud => models::ollama::MISTRAL_LARGE_3_675B_CLOUD,
621            ModelId::OllamaKimiK2ThinkingCloud => models::ollama::KIMI_K2_THINKING_CLOUD,
622            ModelId::OllamaGlm47Cloud => models::ollama::GLM_47_CLOUD,
623            ModelId::OllamaMinimaxM21Cloud => models::ollama::MINIMAX_M21_CLOUD,
624            ModelId::OllamaGemini3FlashPreviewCloud => models::ollama::GEMINI_3_FLASH_PREVIEW_CLOUD,
625
626            ModelId::OllamaQwen3Coder480bCloud => models::ollama::QWEN3_CODER_480B_CLOUD,
627            ModelId::OllamaGlm46Cloud => models::ollama::GLM_46_CLOUD,
628            ModelId::OllamaGemini3ProPreviewLatestCloud => {
629                models::ollama::GEMINI_3_PRO_PREVIEW_LATEST_CLOUD
630            }
631            ModelId::OllamaDevstral2123bCloud => models::ollama::DEVSTRAL_2_123B_CLOUD,
632            ModelId::OllamaMinimaxM2Cloud => models::ollama::MINIMAX_M2_CLOUD,
633            ModelId::OllamaNemotron3Nano30bCloud => models::ollama::NEMOTRON_3_NANO_30B_CLOUD,
634            // LM Studio models
635            ModelId::LmStudioMetaLlama38BInstruct => models::lmstudio::META_LLAMA_3_8B_INSTRUCT,
636            ModelId::LmStudioMetaLlama318BInstruct => models::lmstudio::META_LLAMA_31_8B_INSTRUCT,
637            ModelId::LmStudioQwen257BInstruct => models::lmstudio::QWEN25_7B_INSTRUCT,
638            ModelId::LmStudioGemma22BIt => models::lmstudio::GEMMA_2_2B_IT,
639            ModelId::LmStudioGemma29BIt => models::lmstudio::GEMMA_2_9B_IT,
640            ModelId::LmStudioPhi31Mini4kInstruct => models::lmstudio::PHI_31_MINI_4K_INSTRUCT,
641            // Hugging Face models
642            ModelId::HuggingFaceDeepseekV32 => models::huggingface::DEEPSEEK_V32,
643            ModelId::HuggingFaceOpenAIGptOss20b => models::huggingface::OPENAI_GPT_OSS_20B,
644            ModelId::HuggingFaceOpenAIGptOss120b => models::huggingface::OPENAI_GPT_OSS_120B,
645            ModelId::HuggingFaceGlm47 => models::huggingface::ZAI_GLM_47,
646            ModelId::HuggingFaceKimiK2Thinking => models::huggingface::MOONSHOT_KIMI_K2_THINKING,
647            ModelId::HuggingFaceMinimaxM21Novita => models::huggingface::MINIMAX_M2_1_NOVITA,
648            ModelId::HuggingFaceDeepseekV32Novita => models::huggingface::DEEPSEEK_V32_NOVITA,
649            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
650                models::huggingface::XIAOMI_MIMO_V2_FLASH_NOVITA
651            }
652            // MiniMax models
653            ModelId::MinimaxM21 => models::minimax::MINIMAX_M2_1,
654            ModelId::MinimaxM21Lightning => models::minimax::MINIMAX_M2_1_LIGHTNING,
655            ModelId::MinimaxM2 => models::minimax::MINIMAX_M2,
656            // OpenRouter models
657            _ => unreachable!(),
658        }
659    }
660
661    /// Get the provider for this model
662    pub fn provider(&self) -> Provider {
663        if self.openrouter_metadata().is_some() {
664            return Provider::OpenRouter;
665        }
666        match self {
667            ModelId::Gemini25FlashPreview
668            | ModelId::Gemini25Flash
669            | ModelId::Gemini25FlashLite
670            | ModelId::Gemini25Pro
671            | ModelId::Gemini3ProPreview => Provider::Gemini,
672            ModelId::GPT5
673            | ModelId::GPT5Codex
674            | ModelId::GPT5Mini
675            | ModelId::GPT5Nano
676            | ModelId::CodexMiniLatest
677            | ModelId::OpenAIGptOss20b
678            | ModelId::OpenAIGptOss120b => Provider::OpenAI,
679            ModelId::ClaudeOpus45
680            | ModelId::ClaudeOpus41
681            | ModelId::ClaudeSonnet45
682            | ModelId::ClaudeHaiku45
683            | ModelId::ClaudeSonnet4 => Provider::Anthropic,
684            ModelId::DeepSeekChat | ModelId::DeepSeekReasoner => Provider::DeepSeek,
685            ModelId::HuggingFaceDeepseekV32
686            | ModelId::HuggingFaceOpenAIGptOss20b
687            | ModelId::HuggingFaceOpenAIGptOss120b
688            | ModelId::HuggingFaceGlm47
689            | ModelId::HuggingFaceKimiK2Thinking
690            | ModelId::HuggingFaceMinimaxM21Novita
691            | ModelId::HuggingFaceDeepseekV32Novita
692            | ModelId::HuggingFaceXiaomiMimoV2FlashNovita => Provider::HuggingFace,
693            ModelId::XaiGrok4
694            | ModelId::XaiGrok4Mini
695            | ModelId::XaiGrok4Code
696            | ModelId::XaiGrok4CodeLatest
697            | ModelId::XaiGrok4Vision => Provider::XAI,
698            ModelId::ZaiGlm4Plus
699            | ModelId::ZaiGlm4PlusDeepThinking
700            | ModelId::ZaiGlm47
701            | ModelId::ZaiGlm47DeepThinking
702            | ModelId::ZaiGlm46
703            | ModelId::ZaiGlm46DeepThinking
704            | ModelId::ZaiGlm46V
705            | ModelId::ZaiGlm46VFlash
706            | ModelId::ZaiGlm46VFlashX
707            | ModelId::ZaiGlm45
708            | ModelId::ZaiGlm45DeepThinking
709            | ModelId::ZaiGlm45Air
710            | ModelId::ZaiGlm45X
711            | ModelId::ZaiGlm45Airx
712            | ModelId::ZaiGlm45Flash
713            | ModelId::ZaiGlm45V
714            | ModelId::ZaiGlm432b0414128k => Provider::ZAI,
715            ModelId::OllamaGptOss20b
716            | ModelId::OllamaGptOss20bCloud
717            | ModelId::OllamaGptOss120bCloud
718            | ModelId::OllamaQwen317b
719            | ModelId::OllamaDeepseekV32Cloud
720            | ModelId::OllamaQwen3Next80bCloud
721            | ModelId::OllamaMistralLarge3675bCloud
722            | ModelId::OllamaKimiK2ThinkingCloud
723            | ModelId::OllamaQwen3Coder480bCloud
724            | ModelId::OllamaGlm46Cloud
725            | ModelId::OllamaGemini3ProPreviewLatestCloud
726            | ModelId::OllamaGemini3FlashPreviewCloud
727            | ModelId::OllamaDevstral2123bCloud
728            | ModelId::OllamaMinimaxM2Cloud
729            | ModelId::OllamaMinimaxM21Cloud
730            | ModelId::OllamaNemotron3Nano30bCloud
731            | ModelId::OllamaGlm47Cloud => Provider::Ollama,
732            ModelId::LmStudioMetaLlama38BInstruct
733            | ModelId::LmStudioMetaLlama318BInstruct
734            | ModelId::LmStudioQwen257BInstruct
735            | ModelId::LmStudioGemma22BIt
736            | ModelId::LmStudioGemma29BIt
737            | ModelId::LmStudioPhi31Mini4kInstruct => Provider::LmStudio,
738            ModelId::MinimaxM21 | ModelId::MinimaxM21Lightning | ModelId::MinimaxM2 => {
739                Provider::Minimax
740            }
741            _ => unreachable!(),
742        }
743    }
744
745    /// Whether this model supports configurable reasoning effort levels
746    pub fn supports_reasoning_effort(&self) -> bool {
747        self.provider().supports_reasoning_effort(self.as_str())
748    }
749
750    /// Get the display name for the model (human-readable)
751    pub fn display_name(&self) -> &'static str {
752        if let Some(meta) = self.openrouter_metadata() {
753            return meta.display;
754        }
755        match self {
756            // Gemini models
757            ModelId::Gemini25FlashPreview => "Gemini 2.5 Flash Preview",
758            ModelId::Gemini25Flash => "Gemini 2.5 Flash",
759            ModelId::Gemini25FlashLite => "Gemini 2.5 Flash Lite",
760            ModelId::Gemini25Pro => "Gemini 2.5 Pro",
761            ModelId::Gemini3ProPreview => "Gemini 3 Pro Preview",
762            // OpenAI models
763            ModelId::GPT5 => "GPT-5",
764            ModelId::GPT5Codex => "GPT-5 Codex",
765            ModelId::GPT5Mini => "GPT-5 Mini",
766            ModelId::GPT5Nano => "GPT-5 Nano",
767            ModelId::CodexMiniLatest => "Codex Mini Latest",
768            // Anthropic models
769            ModelId::ClaudeOpus45 => "Claude Opus 4.5",
770            ModelId::ClaudeOpus41 => "Claude Opus 4.1",
771            ModelId::ClaudeSonnet45 => "Claude Sonnet 4.5",
772            ModelId::ClaudeHaiku45 => "Claude Haiku 4.5",
773            ModelId::ClaudeSonnet4 => "Claude Sonnet 4",
774            // DeepSeek models
775            ModelId::DeepSeekChat => "DeepSeek V3.2 Chat",
776            ModelId::DeepSeekReasoner => "DeepSeek V3.2 Reasoner",
777            // xAI models
778            ModelId::XaiGrok4 => "Grok-4",
779            ModelId::XaiGrok4Mini => "Grok-4 Mini",
780            ModelId::XaiGrok4Code => "Grok-4 Code",
781            ModelId::XaiGrok4CodeLatest => "Grok-4 Code Latest",
782            ModelId::XaiGrok4Vision => "Grok-4 Vision",
783            // Z.AI models
784            ModelId::ZaiGlm4Plus => "GLM 4 Plus",
785            ModelId::ZaiGlm4PlusDeepThinking => "GLM 4 Plus Deep Thinking",
786            ModelId::ZaiGlm47 => "GLM 4.7",
787            ModelId::ZaiGlm47DeepThinking => "GLM 4.7 Deep Thinking",
788            ModelId::ZaiGlm46 => "GLM 4.6",
789            ModelId::ZaiGlm46DeepThinking => "GLM 4.6 Deep Thinking",
790            ModelId::ZaiGlm46V => "GLM 4.6V",
791            ModelId::ZaiGlm46VFlash => "GLM 4.6V Flash",
792            ModelId::ZaiGlm46VFlashX => "GLM 4.6V FlashX",
793            ModelId::ZaiGlm45 => "GLM 4.5",
794            ModelId::ZaiGlm45DeepThinking => "GLM 4.5 Deep Thinking",
795            ModelId::ZaiGlm45Air => "GLM 4.5 Air",
796            ModelId::ZaiGlm45X => "GLM 4.5 X",
797            ModelId::ZaiGlm45Airx => "GLM 4.5 AirX",
798            ModelId::ZaiGlm45Flash => "GLM 4.5 Flash",
799            ModelId::ZaiGlm45V => "GLM 4.5V",
800            ModelId::ZaiGlm432b0414128k => "GLM 4 32B 0414 128K",
801            // Ollama models
802            ModelId::OllamaGptOss20b => "GPT-OSS 20B (local)",
803            ModelId::OllamaGptOss20bCloud => "GPT-OSS 20B (cloud)",
804            ModelId::OllamaGptOss120bCloud => "GPT-OSS 120B (cloud)",
805            ModelId::OllamaQwen317b => "Qwen3 1.7B (local)",
806            ModelId::OllamaDeepseekV32Cloud => "DeepSeek V3.2 (cloud)",
807            ModelId::OllamaQwen3Next80bCloud => "Qwen3 Next 80B (cloud)",
808            ModelId::OllamaMistralLarge3675bCloud => "Mistral Large 3 675B (cloud)",
809            ModelId::OllamaKimiK2ThinkingCloud => "Kimi K2 Thinking (cloud)",
810
811            ModelId::OllamaQwen3Coder480bCloud => "Qwen3 Coder 480B (cloud)",
812            ModelId::OllamaGlm46Cloud => "GLM-4.6 (cloud)",
813            ModelId::OllamaGemini3ProPreviewLatestCloud => "Gemini 3 Pro Preview (cloud)",
814            ModelId::OllamaGemini3FlashPreviewCloud => "Gemini 3 Flash Preview (cloud)",
815            ModelId::OllamaDevstral2123bCloud => "Devstral 2 123B (cloud)",
816            ModelId::OllamaMinimaxM2Cloud => "MiniMax-M2 (cloud)",
817            ModelId::OllamaGlm47Cloud => "GLM-4.7 (cloud)",
818            ModelId::OllamaMinimaxM21Cloud => "MiniMax-M2.1 (cloud)",
819            ModelId::OllamaNemotron3Nano30bCloud => "Nemotron-3-Nano 30B (cloud)",
820            ModelId::LmStudioMetaLlama38BInstruct => "Meta Llama 3 8B (LM Studio)",
821            ModelId::LmStudioMetaLlama318BInstruct => "Meta Llama 3.1 8B (LM Studio)",
822            ModelId::LmStudioQwen257BInstruct => "Qwen2.5 7B (LM Studio)",
823            ModelId::LmStudioGemma22BIt => "Gemma 2 2B (LM Studio)",
824            ModelId::LmStudioGemma29BIt => "Gemma 2 9B (LM Studio)",
825            ModelId::LmStudioPhi31Mini4kInstruct => "Phi-3.1 Mini 4K (LM Studio)",
826            // Hugging Face models
827            ModelId::HuggingFaceDeepseekV32 => "DeepSeek V3.2 (HF)",
828            ModelId::HuggingFaceOpenAIGptOss20b => "GPT-OSS 20B (HF)",
829            ModelId::HuggingFaceOpenAIGptOss120b => "GPT-OSS 120B (HF)",
830            ModelId::HuggingFaceGlm47 => "GLM-4.7 (HF)",
831            ModelId::HuggingFaceKimiK2Thinking => "Kimi K2 Thinking (HF)",
832            ModelId::HuggingFaceMinimaxM21Novita => "MiniMax-M2.1 (Novita)",
833            ModelId::HuggingFaceDeepseekV32Novita => "DeepSeek V3.2 (Novita)",
834            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => "MiMo-V2-Flash (Novita)",
835            // MiniMax models
836            ModelId::MinimaxM21 => "MiniMax-M2.1",
837            ModelId::MinimaxM21Lightning => "MiniMax-M2.1-lightning",
838            ModelId::MinimaxM2 => "MiniMax-M2",
839            // OpenRouter models
840            _ => unreachable!(),
841        }
842    }
843
844    /// Get a description of the model's characteristics
845    pub fn description(&self) -> &'static str {
846        if let Some(meta) = self.openrouter_metadata() {
847            return meta.description;
848        }
849        match self {
850            // Gemini models
851            ModelId::Gemini25FlashPreview => {
852                "Latest fast Gemini model with advanced multimodal capabilities"
853            }
854            ModelId::Gemini25Flash => {
855                "Legacy alias for Gemini 2.5 Flash Preview (same capabilities)"
856            }
857            ModelId::Gemini25FlashLite => {
858                "Legacy alias for Gemini 2.5 Flash Preview optimized for efficiency"
859            }
860            ModelId::Gemini25Pro => "Latest most capable Gemini model with reasoning",
861            ModelId::Gemini3ProPreview => {
862                "Preview of next-generation Gemini 3 Pro model with advanced reasoning and capabilities"
863            }
864            // OpenAI models
865            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
866            ModelId::GPT5Codex => {
867                "Code-focused GPT-5 variant optimized for tool calling and structured outputs"
868            }
869            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
870            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
871            ModelId::CodexMiniLatest => "Latest Codex model optimized for code generation",
872            ModelId::OpenAIGptOss20b => {
873                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
874            }
875            ModelId::OpenAIGptOss120b => {
876                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
877            }
878            // Anthropic models
879            ModelId::ClaudeOpus45 => {
880                "Latest flagship Anthropic model with exceptional reasoning capabilities"
881            }
882            ModelId::ClaudeOpus41 => {
883                "Latest flagship Anthropic model with exceptional reasoning capabilities"
884            }
885            ModelId::ClaudeSonnet45 => "Latest balanced Anthropic model for general tasks",
886            ModelId::ClaudeHaiku45 => {
887                "Latest efficient Anthropic model optimized for low-latency agent workflows"
888            }
889            ModelId::ClaudeSonnet4 => {
890                "Previous balanced Anthropic model maintained for compatibility"
891            }
892            // DeepSeek models
893            ModelId::DeepSeekChat => {
894                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
895            }
896            ModelId::DeepSeekReasoner => {
897                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
898            }
899            // xAI models
900            ModelId::XaiGrok4 => "Flagship Grok 4 model with long context and tool use",
901            ModelId::XaiGrok4Mini => "Efficient Grok 4 Mini tuned for low latency",
902            ModelId::XaiGrok4Code => "Code-specialized Grok 4 deployment with tool support",
903            ModelId::XaiGrok4CodeLatest => {
904                "Latest Grok 4 code model offering enhanced reasoning traces"
905            }
906            ModelId::XaiGrok4Vision => "Multimodal Grok 4 model with image understanding",
907            // Z.AI models
908            ModelId::ZaiGlm4Plus | ModelId::ZaiGlm4PlusDeepThinking => {
909                "Z.AI flagship model with top-tier capability in reasoning, writing, and tool-use"
910            }
911            ModelId::ZaiGlm47 | ModelId::ZaiGlm47DeepThinking => {
912                "Latest Z.AI GLM flagship with enhanced reasoning, 200k context and coding strengths"
913            }
914            ModelId::ZaiGlm46 | ModelId::ZaiGlm46DeepThinking => {
915                "Previous Z.AI GLM flagship with long-context reasoning and coding strengths"
916            }
917            ModelId::ZaiGlm46V | ModelId::ZaiGlm46VFlash | ModelId::ZaiGlm46VFlashX => {
918                "Vision-capable GLM 4.6 release optimized for multimodal understanding"
919            }
920            ModelId::ZaiGlm45 | ModelId::ZaiGlm45DeepThinking => {
921                "Balanced GLM 4.5 release for general assistant tasks"
922            }
923            ModelId::ZaiGlm45Air => "Efficient GLM 4.5 Air variant tuned for lower latency",
924            ModelId::ZaiGlm45X => "Enhanced GLM 4.5 X variant with improved reasoning",
925            ModelId::ZaiGlm45Airx => "Hybrid GLM 4.5 AirX variant blending efficiency with quality",
926            ModelId::ZaiGlm45Flash => "Low-latency GLM 4.5 Flash optimized for responsiveness",
927            ModelId::ZaiGlm45V => "Vision-capable GLM 4.5 release for multimodal tasks",
928            ModelId::ZaiGlm432b0414128k => {
929                "Legacy GLM 4 32B deployment offering extended 128K context window"
930            }
931            ModelId::OllamaGptOss20b => {
932                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
933            }
934            ModelId::OllamaGptOss20bCloud => {
935                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
936            }
937            ModelId::OllamaGptOss120bCloud => {
938                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
939            }
940            ModelId::OllamaQwen317b => {
941                "Qwen3 1.7B served locally through Ollama without external API requirements"
942            }
943            ModelId::OllamaDeepseekV32Cloud => {
944                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
945            }
946            ModelId::OllamaQwen3Next80bCloud => {
947                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
948            }
949            ModelId::OllamaMistralLarge3675bCloud => {
950                "Mistral Large 3 675B reasoning model via Ollama Cloud for complex problem-solving"
951            }
952            ModelId::OllamaKimiK2ThinkingCloud => {
953                "MoonshotAI Kimi K2 thinking model via Ollama Cloud with explicit reasoning traces"
954            }
955            ModelId::OllamaGlm47Cloud => "Advancing the Coding Capability",
956            ModelId::OllamaMinimaxM21Cloud => {
957                "Exceptional multilingual capabilities to elevate code engineering"
958            }
959            ModelId::OllamaGemini3FlashPreviewCloud => {
960                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
961            }
962            ModelId::OllamaGemini3ProPreviewLatestCloud => {
963                "Gemini 3 Pro Preview Latest offers advanced reasoning and long context capabilities."
964            }
965            ModelId::OllamaNemotron3Nano30bCloud => {
966                "NVIDIA Nemotron-3-Nano 30B brings efficient excellence to code"
967            }
968
969            ModelId::OllamaQwen3Coder480bCloud => {
970                "Cloud-hosted Qwen3 Coder 480B model accessed through Ollama Cloud for coding tasks"
971            }
972            ModelId::OllamaGlm46Cloud => {
973                "Cloud-hosted GLM-4.6 model accessed through Ollama Cloud for reasoning and coding"
974            }
975            ModelId::OllamaMinimaxM2Cloud => {
976                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
977            }
978            ModelId::LmStudioMetaLlama38BInstruct => {
979                "Meta Llama 3 8B running through LM Studio's local OpenAI-compatible server"
980            }
981            ModelId::LmStudioMetaLlama318BInstruct => {
982                "Meta Llama 3.1 8B running through LM Studio's local OpenAI-compatible server"
983            }
984            ModelId::LmStudioQwen257BInstruct => {
985                "Qwen2.5 7B hosted in LM Studio for local experimentation and coding tasks"
986            }
987            ModelId::LmStudioGemma22BIt => {
988                "Gemma 2 2B IT deployed via LM Studio for lightweight on-device assistance"
989            }
990            ModelId::LmStudioGemma29BIt => {
991                "Gemma 2 9B IT served locally via LM Studio when you need additional capacity"
992            }
993            ModelId::LmStudioPhi31Mini4kInstruct => {
994                "Phi-3.1 Mini 4K hosted in LM Studio for compact reasoning and experimentation"
995            }
996            ModelId::MinimaxM21 => {
997                "Latest MiniMax-M2.1 model with enhanced code understanding and reasoning"
998            }
999            ModelId::MinimaxM21Lightning => {
1000                "Fast version of MiniMax-M2.1 for rapid conversational tasks"
1001            }
1002            ModelId::MinimaxM2 => {
1003                "MiniMax-M2 via Anthropic-compatible API with reasoning and tool use"
1004            }
1005            ModelId::HuggingFaceDeepseekV32 => {
1006                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
1007            }
1008            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
1009            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
1010            ModelId::HuggingFaceGlm47 => "Z.AI GLM-4.7 via Hugging Face router",
1011            ModelId::HuggingFaceKimiK2Thinking => {
1012                "MoonshotAI Kimi K2 Thinking via Hugging Face router"
1013            }
1014            ModelId::HuggingFaceMinimaxM21Novita => {
1015                "MiniMax-M2.1 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
1016            }
1017            ModelId::HuggingFaceDeepseekV32Novita => {
1018                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
1019            }
1020            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
1021                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
1022            }
1023            _ => unreachable!(),
1024        }
1025    }
1026
1027    /// Return the OpenRouter vendor slug when this identifier maps to a marketplace listing
1028    pub fn openrouter_vendor(&self) -> Option<&'static str> {
1029        self.openrouter_metadata().map(|meta| meta.vendor)
1030    }
1031
1032    /// Get all available models as a vector
1033    pub fn all_models() -> Vec<ModelId> {
1034        let mut models = vec![
1035            // Gemini models
1036            ModelId::Gemini25FlashPreview,
1037            ModelId::Gemini25Flash,
1038            ModelId::Gemini25FlashLite,
1039            ModelId::Gemini25Pro,
1040            // OpenAI models
1041            ModelId::GPT5,
1042            ModelId::GPT5Codex,
1043            ModelId::GPT5Mini,
1044            ModelId::GPT5Nano,
1045            ModelId::CodexMiniLatest,
1046            // Anthropic models
1047            ModelId::ClaudeOpus45,
1048            ModelId::ClaudeOpus41,
1049            ModelId::ClaudeSonnet45,
1050            ModelId::ClaudeHaiku45,
1051            ModelId::ClaudeSonnet4,
1052            // DeepSeek models
1053            ModelId::DeepSeekChat,
1054            ModelId::DeepSeekReasoner,
1055            // xAI models
1056            ModelId::XaiGrok4,
1057            ModelId::XaiGrok4Mini,
1058            ModelId::XaiGrok4Code,
1059            ModelId::XaiGrok4CodeLatest,
1060            ModelId::XaiGrok4Vision,
1061            // Z.AI models
1062            ModelId::ZaiGlm46,
1063            ModelId::ZaiGlm45,
1064            ModelId::ZaiGlm45Air,
1065            ModelId::ZaiGlm45X,
1066            ModelId::ZaiGlm45Airx,
1067            ModelId::ZaiGlm45Flash,
1068            ModelId::ZaiGlm432b0414128k,
1069            // Ollama models
1070            ModelId::OllamaGptOss20b,
1071            ModelId::OllamaGptOss20bCloud,
1072            ModelId::OllamaGptOss120bCloud,
1073            ModelId::OllamaQwen317b,
1074            ModelId::OllamaDeepseekV32Cloud,
1075            ModelId::OllamaQwen3Next80bCloud,
1076            ModelId::OllamaMistralLarge3675bCloud,
1077            ModelId::OllamaKimiK2ThinkingCloud,
1078            ModelId::OllamaQwen3Coder480bCloud,
1079            ModelId::OllamaGlm46Cloud,
1080            ModelId::OllamaGlm47Cloud,
1081            ModelId::OllamaGemini3ProPreviewLatestCloud,
1082            ModelId::OllamaGemini3FlashPreviewCloud,
1083            ModelId::OllamaDevstral2123bCloud,
1084            ModelId::OllamaMinimaxM2Cloud,
1085            ModelId::OllamaMinimaxM21Cloud,
1086            ModelId::OllamaNemotron3Nano30bCloud,
1087            // LM Studio models
1088            ModelId::LmStudioMetaLlama38BInstruct,
1089            ModelId::LmStudioMetaLlama318BInstruct,
1090            ModelId::LmStudioQwen257BInstruct,
1091            ModelId::LmStudioGemma22BIt,
1092            ModelId::LmStudioGemma29BIt,
1093            ModelId::LmStudioPhi31Mini4kInstruct,
1094            // MiniMax models
1095            ModelId::MinimaxM21,
1096            ModelId::MinimaxM21Lightning,
1097            ModelId::MinimaxM2,
1098            // Hugging Face models
1099            ModelId::HuggingFaceDeepseekV32,
1100            ModelId::HuggingFaceOpenAIGptOss20b,
1101            ModelId::HuggingFaceOpenAIGptOss120b,
1102            ModelId::HuggingFaceGlm47,
1103            ModelId::HuggingFaceKimiK2Thinking,
1104            ModelId::HuggingFaceMinimaxM21Novita,
1105            ModelId::HuggingFaceDeepseekV32Novita,
1106            ModelId::HuggingFaceXiaomiMimoV2FlashNovita,
1107        ];
1108        models.extend(Self::openrouter_models());
1109        models
1110    }
1111
1112    /// Get all models for a specific provider
1113    pub fn models_for_provider(provider: Provider) -> Vec<ModelId> {
1114        Self::all_models()
1115            .into_iter()
1116            .filter(|model| model.provider() == provider)
1117            .collect()
1118    }
1119
1120    /// Get recommended fallback models in order of preference
1121    pub fn fallback_models() -> Vec<ModelId> {
1122        vec![
1123            ModelId::Gemini25FlashPreview,
1124            ModelId::Gemini25Pro,
1125            ModelId::GPT5,
1126            ModelId::OpenAIGptOss20b,
1127            ModelId::ClaudeOpus45,
1128            ModelId::ClaudeOpus41,
1129            ModelId::ClaudeSonnet45,
1130            ModelId::DeepSeekReasoner,
1131            ModelId::XaiGrok4,
1132            ModelId::ZaiGlm46,
1133            ModelId::OpenRouterGrokCodeFast1,
1134        ]
1135    }
1136
1137    /// Get the default orchestrator model (more capable)
1138    pub fn default_orchestrator() -> Self {
1139        ModelId::Gemini25Pro
1140    }
1141
1142    /// Get the default subagent model (fast and efficient)
1143    pub fn default_subagent() -> Self {
1144        ModelId::Gemini25FlashPreview
1145    }
1146
1147    /// Get provider-specific defaults for orchestrator
1148    pub fn default_orchestrator_for_provider(provider: Provider) -> Self {
1149        match provider {
1150            Provider::Gemini => ModelId::Gemini25Pro,
1151            Provider::OpenAI => ModelId::GPT5,
1152            Provider::Anthropic => ModelId::ClaudeOpus45,
1153            Provider::DeepSeek => ModelId::DeepSeekReasoner,
1154            Provider::XAI => ModelId::XaiGrok4,
1155            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1156            Provider::Ollama => ModelId::OllamaGptOss20b,
1157            Provider::LmStudio => ModelId::LmStudioMetaLlama318BInstruct,
1158            Provider::ZAI => ModelId::ZaiGlm46,
1159            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1160            Provider::Minimax => ModelId::MinimaxM21,
1161            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss120b,
1162        }
1163    }
1164
1165    /// Get provider-specific defaults for subagent
1166    pub fn default_subagent_for_provider(provider: Provider) -> Self {
1167        match provider {
1168            Provider::Gemini => ModelId::Gemini25FlashPreview,
1169            Provider::OpenAI => ModelId::GPT5Mini,
1170            Provider::Anthropic => ModelId::ClaudeSonnet45,
1171            Provider::DeepSeek => ModelId::DeepSeekChat,
1172            Provider::XAI => ModelId::XaiGrok4Code,
1173            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1174            Provider::Ollama => ModelId::OllamaQwen317b,
1175            Provider::LmStudio => ModelId::LmStudioQwen257BInstruct,
1176            Provider::ZAI => ModelId::ZaiGlm45Flash,
1177            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1178            Provider::Minimax => ModelId::MinimaxM21Lightning,
1179            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss20b,
1180        }
1181    }
1182
1183    /// Get provider-specific defaults for single agent
1184    pub fn default_single_for_provider(provider: Provider) -> Self {
1185        match provider {
1186            Provider::Gemini => ModelId::Gemini25FlashPreview,
1187            Provider::OpenAI => ModelId::GPT5,
1188            Provider::Anthropic => ModelId::ClaudeOpus45,
1189            Provider::DeepSeek => ModelId::DeepSeekReasoner,
1190            Provider::XAI => ModelId::XaiGrok4,
1191            Provider::OpenRouter => ModelId::OpenRouterGrokCodeFast1,
1192            Provider::Ollama => ModelId::OllamaGptOss20b,
1193            Provider::LmStudio => ModelId::LmStudioMetaLlama318BInstruct,
1194            Provider::ZAI => ModelId::ZaiGlm46,
1195            Provider::Moonshot => ModelId::OpenRouterGrokCodeFast1, // Fallback: no Moonshot models available
1196            Provider::Minimax => ModelId::MinimaxM21,
1197            Provider::HuggingFace => ModelId::HuggingFaceOpenAIGptOss120b,
1198        }
1199    }
1200
1201    /// Check if this is a "flash" variant (optimized for speed)
1202    pub fn is_flash_variant(&self) -> bool {
1203        matches!(
1204            self,
1205            ModelId::Gemini25FlashPreview
1206                | ModelId::Gemini25Flash
1207                | ModelId::Gemini25FlashLite
1208                | ModelId::ZaiGlm45Flash
1209                | ModelId::ZaiGlm46VFlash
1210                | ModelId::ZaiGlm46VFlashX
1211                | ModelId::MinimaxM21Lightning
1212                | ModelId::OllamaGemini3FlashPreviewCloud
1213        )
1214    }
1215
1216    /// Check if this is a "pro" variant (optimized for capability)
1217    pub fn is_pro_variant(&self) -> bool {
1218        matches!(
1219            self,
1220            ModelId::Gemini25Pro
1221                | ModelId::GPT5
1222                | ModelId::GPT5Codex
1223                | ModelId::ClaudeOpus41
1224                | ModelId::DeepSeekReasoner
1225                | ModelId::XaiGrok4
1226                | ModelId::ZaiGlm4Plus
1227                | ModelId::ZaiGlm4PlusDeepThinking
1228                | ModelId::ZaiGlm47
1229                | ModelId::ZaiGlm47DeepThinking
1230                | ModelId::ZaiGlm46
1231                | ModelId::ZaiGlm46DeepThinking
1232                | ModelId::MinimaxM21
1233                | ModelId::OllamaGlm47Cloud
1234                | ModelId::OllamaMinimaxM21Cloud
1235        )
1236    }
1237
1238    /// Check if this is an optimized/efficient variant
1239    pub fn is_efficient_variant(&self) -> bool {
1240        if let Some(meta) = self.openrouter_metadata() {
1241            return meta.efficient;
1242        }
1243        matches!(
1244            self,
1245            ModelId::Gemini25FlashPreview
1246                | ModelId::Gemini25Flash
1247                | ModelId::Gemini25FlashLite
1248                | ModelId::GPT5Mini
1249                | ModelId::GPT5Nano
1250                | ModelId::ClaudeHaiku45
1251                | ModelId::DeepSeekChat
1252                | ModelId::XaiGrok4Code
1253                | ModelId::ZaiGlm45Air
1254                | ModelId::ZaiGlm45Airx
1255                | ModelId::ZaiGlm45Flash
1256                | ModelId::ZaiGlm46VFlash
1257                | ModelId::ZaiGlm46VFlashX
1258        )
1259    }
1260
1261    /// Check if this is a top-tier model
1262    pub fn is_top_tier(&self) -> bool {
1263        if let Some(meta) = self.openrouter_metadata() {
1264            return meta.top_tier;
1265        }
1266        matches!(
1267            self,
1268            ModelId::Gemini25Pro
1269                | ModelId::GPT5
1270                | ModelId::GPT5Codex
1271                | ModelId::ClaudeOpus41
1272                | ModelId::ClaudeSonnet45
1273                | ModelId::ClaudeSonnet4
1274                | ModelId::DeepSeekReasoner
1275                | ModelId::XaiGrok4
1276                | ModelId::XaiGrok4CodeLatest
1277                | ModelId::ZaiGlm4Plus
1278                | ModelId::ZaiGlm4PlusDeepThinking
1279                | ModelId::ZaiGlm47
1280                | ModelId::ZaiGlm47DeepThinking
1281                | ModelId::ZaiGlm46
1282                | ModelId::ZaiGlm46DeepThinking
1283        )
1284    }
1285
1286    /// Determine whether the model is a reasoning-capable variant
1287    pub fn is_reasoning_variant(&self) -> bool {
1288        if let Some(meta) = self.openrouter_metadata() {
1289            return meta.reasoning;
1290        }
1291        matches!(
1292            self,
1293            ModelId::ZaiGlm4PlusDeepThinking
1294                | ModelId::ZaiGlm47DeepThinking
1295                | ModelId::ZaiGlm46DeepThinking
1296                | ModelId::ZaiGlm45DeepThinking
1297        ) || self.provider().supports_reasoning_effort(self.as_str())
1298    }
1299
1300    /// Determine whether the model supports tool calls/function execution
1301    pub fn supports_tool_calls(&self) -> bool {
1302        if let Some(meta) = self.openrouter_metadata() {
1303            return meta.tool_call;
1304        }
1305        true
1306    }
1307
1308    /// Get the generation/version string for this model
1309    pub fn generation(&self) -> &'static str {
1310        if let Some(meta) = self.openrouter_metadata() {
1311            return meta.generation;
1312        }
1313        match self {
1314            // Gemini generations
1315            ModelId::Gemini25FlashPreview
1316            | ModelId::Gemini25Flash
1317            | ModelId::Gemini25FlashLite
1318            | ModelId::Gemini25Pro => "2.5",
1319            ModelId::Gemini3ProPreview => "3",
1320            // OpenAI generations
1321            ModelId::GPT5
1322            | ModelId::GPT5Codex
1323            | ModelId::GPT5Mini
1324            | ModelId::GPT5Nano
1325            | ModelId::CodexMiniLatest => "5",
1326            // Anthropic generations
1327            ModelId::ClaudeOpus45 | ModelId::ClaudeSonnet45 | ModelId::ClaudeHaiku45 => "4.5",
1328            ModelId::ClaudeOpus41 => "4.1",
1329            ModelId::ClaudeSonnet4 => "4",
1330            // DeepSeek generations
1331            ModelId::DeepSeekChat | ModelId::DeepSeekReasoner => "V3.2-Exp",
1332            // xAI generations
1333            ModelId::XaiGrok4
1334            | ModelId::XaiGrok4Mini
1335            | ModelId::XaiGrok4Code
1336            | ModelId::XaiGrok4CodeLatest
1337            | ModelId::XaiGrok4Vision => "4",
1338            // Z.AI generations
1339            ModelId::ZaiGlm4Plus | ModelId::ZaiGlm4PlusDeepThinking => "4-Plus",
1340            ModelId::ZaiGlm47 | ModelId::ZaiGlm47DeepThinking => "4.7",
1341            ModelId::ZaiGlm46 | ModelId::ZaiGlm46DeepThinking => "4.6",
1342            ModelId::ZaiGlm46V | ModelId::ZaiGlm46VFlash | ModelId::ZaiGlm46VFlashX => "4.6",
1343            ModelId::ZaiGlm45
1344            | ModelId::ZaiGlm45DeepThinking
1345            | ModelId::ZaiGlm45Air
1346            | ModelId::ZaiGlm45X
1347            | ModelId::ZaiGlm45Airx
1348            | ModelId::ZaiGlm45Flash
1349            | ModelId::ZaiGlm45V => "4.5",
1350            ModelId::ZaiGlm432b0414128k => "4-32B",
1351            ModelId::OllamaGptOss20b => "oss",
1352            ModelId::OllamaGptOss20bCloud => "oss-cloud",
1353            ModelId::OllamaGptOss120bCloud => "oss-cloud",
1354            ModelId::OllamaQwen317b => "oss",
1355            ModelId::OllamaDeepseekV32Cloud => "deepseek-v3.2",
1356            ModelId::OllamaQwen3Next80bCloud => "qwen3-next",
1357            ModelId::OllamaMistralLarge3675bCloud => "mistral-large-3",
1358            ModelId::OllamaKimiK2ThinkingCloud => "kimi-k2-thinking",
1359            ModelId::OllamaQwen3Coder480bCloud => "qwen3-coder-cloud",
1360            ModelId::OllamaGlm46Cloud => "glm-cloud",
1361            ModelId::OllamaMinimaxM2Cloud => "minimax-cloud",
1362            ModelId::LmStudioMetaLlama38BInstruct => "meta-llama-3",
1363            ModelId::LmStudioMetaLlama318BInstruct => "meta-llama-3.1",
1364            ModelId::LmStudioQwen257BInstruct => "qwen2.5",
1365            ModelId::LmStudioGemma22BIt => "gemma-2",
1366            ModelId::LmStudioGemma29BIt => "gemma-2",
1367            ModelId::LmStudioPhi31Mini4kInstruct => "phi-3.1",
1368            ModelId::MinimaxM21 | ModelId::MinimaxM21Lightning => "M2.1",
1369            ModelId::MinimaxM2 => "M2",
1370            ModelId::HuggingFaceDeepseekV32 | ModelId::HuggingFaceDeepseekV32Novita => "v3.2",
1371            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => "v2-flash",
1372            ModelId::HuggingFaceGlm47 => "4.7",
1373            ModelId::HuggingFaceKimiK2Thinking => "k2",
1374            ModelId::HuggingFaceMinimaxM21Novita => "m2.1",
1375            ModelId::HuggingFaceOpenAIGptOss20b | ModelId::HuggingFaceOpenAIGptOss120b => "oss",
1376            _ => unreachable!(),
1377        }
1378    }
1379}
1380
1381impl fmt::Display for ModelId {
1382    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1383        write!(f, "{}", self.as_str())
1384    }
1385}
1386
1387impl FromStr for ModelId {
1388    type Err = ModelParseError;
1389
1390    fn from_str(s: &str) -> Result<Self, Self::Err> {
1391        if let Some(model) = Self::parse_openrouter_model(s) {
1392            return Ok(model);
1393        }
1394
1395        use crate::constants::models;
1396        match s {
1397            // Gemini models
1398            s if s == models::GEMINI_2_5_FLASH_PREVIEW => Ok(ModelId::Gemini25FlashPreview),
1399            s if s == models::GEMINI_2_5_FLASH => Ok(ModelId::Gemini25Flash),
1400            s if s == models::GEMINI_2_5_FLASH_LITE => Ok(ModelId::Gemini25FlashLite),
1401            s if s == models::GEMINI_2_5_PRO => Ok(ModelId::Gemini25Pro),
1402            s if s == models::GEMINI_3_PRO_PREVIEW => Ok(ModelId::Gemini3ProPreview),
1403            // OpenAI models
1404            s if s == models::GPT_5 => Ok(ModelId::GPT5),
1405            s if s == models::GPT_5_CODEX => Ok(ModelId::GPT5Codex),
1406            s if s == models::GPT_5_MINI => Ok(ModelId::GPT5Mini),
1407            s if s == models::GPT_5_NANO => Ok(ModelId::GPT5Nano),
1408            s if s == models::CODEX_MINI_LATEST => Ok(ModelId::CodexMiniLatest),
1409            s if s == models::openai::GPT_OSS_20B => Ok(ModelId::OpenAIGptOss20b),
1410            s if s == models::openai::GPT_OSS_120B => Ok(ModelId::OpenAIGptOss120b),
1411            // Anthropic models
1412            s if s == models::CLAUDE_OPUS_4_5 => Ok(ModelId::ClaudeOpus45),
1413            s if s == models::CLAUDE_OPUS_4_1 => Ok(ModelId::ClaudeOpus41),
1414            s if s == models::CLAUDE_SONNET_4_5 => Ok(ModelId::ClaudeSonnet45),
1415            s if s == models::CLAUDE_HAIKU_4_5 => Ok(ModelId::ClaudeHaiku45),
1416            s if s == models::CLAUDE_SONNET_4_5_20250929 => Ok(ModelId::ClaudeSonnet4),
1417            // DeepSeek models
1418            s if s == models::DEEPSEEK_CHAT => Ok(ModelId::DeepSeekChat),
1419            s if s == models::DEEPSEEK_REASONER => Ok(ModelId::DeepSeekReasoner),
1420            // xAI models
1421            s if s == models::xai::GROK_4 => Ok(ModelId::XaiGrok4),
1422            s if s == models::xai::GROK_4_MINI => Ok(ModelId::XaiGrok4Mini),
1423            s if s == models::xai::GROK_4_CODE => Ok(ModelId::XaiGrok4Code),
1424            s if s == models::xai::GROK_4_CODE_LATEST => Ok(ModelId::XaiGrok4CodeLatest),
1425            s if s == models::xai::GROK_4_VISION => Ok(ModelId::XaiGrok4Vision),
1426            // Z.AI models
1427            s if s == models::zai::GLM_4_PLUS => Ok(ModelId::ZaiGlm4Plus),
1428            s if s == models::zai::GLM_4_PLUS_DEEP_THINKING => Ok(ModelId::ZaiGlm4PlusDeepThinking),
1429            s if s == models::zai::GLM_4_7 => Ok(ModelId::ZaiGlm47),
1430            s if s == models::zai::GLM_4_7_DEEP_THINKING => Ok(ModelId::ZaiGlm47DeepThinking),
1431            s if s == models::zai::GLM_4_6 => Ok(ModelId::ZaiGlm46),
1432            s if s == models::zai::GLM_4_6_DEEP_THINKING => Ok(ModelId::ZaiGlm46DeepThinking),
1433            s if s == models::zai::GLM_4_6V => Ok(ModelId::ZaiGlm46V),
1434            s if s == models::zai::GLM_4_6V_FLASH => Ok(ModelId::ZaiGlm46VFlash),
1435            s if s == models::zai::GLM_4_6V_FLASHX => Ok(ModelId::ZaiGlm46VFlashX),
1436            s if s == models::zai::GLM_4_5 => Ok(ModelId::ZaiGlm45),
1437            s if s == models::zai::GLM_4_5_DEEP_THINKING => Ok(ModelId::ZaiGlm45DeepThinking),
1438            s if s == models::zai::GLM_4_5_AIR => Ok(ModelId::ZaiGlm45Air),
1439            s if s == models::zai::GLM_4_5_X => Ok(ModelId::ZaiGlm45X),
1440            s if s == models::zai::GLM_4_5_AIRX => Ok(ModelId::ZaiGlm45Airx),
1441            s if s == models::zai::GLM_4_5_FLASH => Ok(ModelId::ZaiGlm45Flash),
1442            s if s == models::zai::GLM_4_5V => Ok(ModelId::ZaiGlm45V),
1443            s if s == models::zai::GLM_4_32B_0414_128K => Ok(ModelId::ZaiGlm432b0414128k),
1444            s if s == models::ollama::GPT_OSS_20B => Ok(ModelId::OllamaGptOss20b),
1445            s if s == models::ollama::GPT_OSS_20B_CLOUD => Ok(ModelId::OllamaGptOss20bCloud),
1446            s if s == models::ollama::GPT_OSS_120B_CLOUD => Ok(ModelId::OllamaGptOss120bCloud),
1447            s if s == models::ollama::QWEN3_1_7B => Ok(ModelId::OllamaQwen317b),
1448            s if s == models::ollama::DEEPSEEK_V32_CLOUD => Ok(ModelId::OllamaDeepseekV32Cloud),
1449            s if s == models::ollama::QWEN3_NEXT_80B_CLOUD => Ok(ModelId::OllamaQwen3Next80bCloud),
1450            s if s == models::ollama::MISTRAL_LARGE_3_675B_CLOUD => {
1451                Ok(ModelId::OllamaMistralLarge3675bCloud)
1452            }
1453            s if s == models::ollama::KIMI_K2_THINKING_CLOUD => {
1454                Ok(ModelId::OllamaKimiK2ThinkingCloud)
1455            }
1456
1457            s if s == models::ollama::QWEN3_CODER_480B_CLOUD => {
1458                Ok(ModelId::OllamaQwen3Coder480bCloud)
1459            }
1460            s if s == models::ollama::GLM_46_CLOUD => Ok(ModelId::OllamaGlm46Cloud),
1461            s if s == models::ollama::GLM_47_CLOUD => Ok(ModelId::OllamaGlm47Cloud),
1462            s if s == models::ollama::GEMINI_3_PRO_PREVIEW_LATEST_CLOUD => {
1463                Ok(ModelId::OllamaGemini3ProPreviewLatestCloud)
1464            }
1465            s if s == models::ollama::GEMINI_3_FLASH_PREVIEW_CLOUD => {
1466                Ok(ModelId::OllamaGemini3FlashPreviewCloud)
1467            }
1468            s if s == models::ollama::MINIMAX_M2_CLOUD => Ok(ModelId::OllamaMinimaxM2Cloud),
1469            s if s == models::ollama::MINIMAX_M21_CLOUD => Ok(ModelId::OllamaMinimaxM21Cloud),
1470            s if s == models::ollama::DEVSTRAL_2_123B_CLOUD => {
1471                Ok(ModelId::OllamaDevstral2123bCloud)
1472            }
1473            s if s == models::ollama::NEMOTRON_3_NANO_30B_CLOUD => {
1474                Ok(ModelId::OllamaNemotron3Nano30bCloud)
1475            }
1476            s if s == models::lmstudio::META_LLAMA_3_8B_INSTRUCT => {
1477                Ok(ModelId::LmStudioMetaLlama38BInstruct)
1478            }
1479            s if s == models::lmstudio::META_LLAMA_31_8B_INSTRUCT => {
1480                Ok(ModelId::LmStudioMetaLlama318BInstruct)
1481            }
1482            s if s == models::lmstudio::QWEN25_7B_INSTRUCT => Ok(ModelId::LmStudioQwen257BInstruct),
1483            s if s == models::lmstudio::GEMMA_2_2B_IT => Ok(ModelId::LmStudioGemma22BIt),
1484            s if s == models::lmstudio::GEMMA_2_9B_IT => Ok(ModelId::LmStudioGemma29BIt),
1485            s if s == models::lmstudio::PHI_31_MINI_4K_INSTRUCT => {
1486                Ok(ModelId::LmStudioPhi31Mini4kInstruct)
1487            }
1488            s if s == models::minimax::MINIMAX_M2_1 => Ok(ModelId::MinimaxM21),
1489            s if s == models::minimax::MINIMAX_M2_1_LIGHTNING => Ok(ModelId::MinimaxM21Lightning),
1490            s if s == models::minimax::MINIMAX_M2 => Ok(ModelId::MinimaxM2),
1491            // Hugging Face models
1492            s if s == models::huggingface::DEEPSEEK_V32 => Ok(ModelId::HuggingFaceDeepseekV32),
1493            s if s == models::huggingface::OPENAI_GPT_OSS_20B => {
1494                Ok(ModelId::HuggingFaceOpenAIGptOss20b)
1495            }
1496            s if s == models::huggingface::OPENAI_GPT_OSS_120B => {
1497                Ok(ModelId::HuggingFaceOpenAIGptOss120b)
1498            }
1499            s if s == models::huggingface::ZAI_GLM_47 => Ok(ModelId::HuggingFaceGlm47),
1500            s if s == models::huggingface::MOONSHOT_KIMI_K2_THINKING => {
1501                Ok(ModelId::HuggingFaceKimiK2Thinking)
1502            }
1503            s if s == models::huggingface::MINIMAX_M2_1_NOVITA => {
1504                Ok(ModelId::HuggingFaceMinimaxM21Novita)
1505            }
1506            s if s == models::huggingface::DEEPSEEK_V32_NOVITA => {
1507                Ok(ModelId::HuggingFaceDeepseekV32Novita)
1508            }
1509            s if s == models::huggingface::XIAOMI_MIMO_V2_FLASH_NOVITA => {
1510                Ok(ModelId::HuggingFaceXiaomiMimoV2FlashNovita)
1511            }
1512            _ => Err(ModelParseError::InvalidModel(s.to_string())),
1513        }
1514    }
1515}
1516
1517/// Error type for model parsing failures
1518#[derive(Debug, Clone, PartialEq)]
1519pub enum ModelParseError {
1520    InvalidModel(String),
1521    InvalidProvider(String),
1522}
1523
1524impl fmt::Display for ModelParseError {
1525    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1526        match self {
1527            ModelParseError::InvalidModel(model) => {
1528                write!(
1529                    f,
1530                    "Invalid model identifier: '{}'. Supported models: {}",
1531                    model,
1532                    ModelId::all_models()
1533                        .iter()
1534                        .map(|m| m.as_str())
1535                        .collect::<Vec<_>>()
1536                        .join(", ")
1537                )
1538            }
1539            ModelParseError::InvalidProvider(provider) => {
1540                write!(
1541                    f,
1542                    "Invalid provider: '{}'. Supported providers: {}",
1543                    provider,
1544                    Provider::all_providers()
1545                        .iter()
1546                        .map(|p| p.to_string())
1547                        .collect::<Vec<_>>()
1548                        .join(", ")
1549                )
1550            }
1551        }
1552    }
1553}
1554
1555impl std::error::Error for ModelParseError {}
1556
1557#[cfg(test)]
1558mod tests {
1559    use super::*;
1560    use crate::constants::models;
1561
1562    #[test]
1563    fn test_model_string_conversion() {
1564        // Gemini models
1565        assert_eq!(
1566            ModelId::Gemini25FlashPreview.as_str(),
1567            models::GEMINI_2_5_FLASH_PREVIEW
1568        );
1569        assert_eq!(ModelId::Gemini25Flash.as_str(), models::GEMINI_2_5_FLASH);
1570        assert_eq!(
1571            ModelId::Gemini25FlashLite.as_str(),
1572            models::GEMINI_2_5_FLASH_LITE
1573        );
1574        assert_eq!(ModelId::Gemini25Pro.as_str(), models::GEMINI_2_5_PRO);
1575        // OpenAI models
1576        assert_eq!(ModelId::GPT5.as_str(), models::GPT_5);
1577        assert_eq!(ModelId::GPT5Codex.as_str(), models::GPT_5_CODEX);
1578        assert_eq!(ModelId::GPT5Mini.as_str(), models::GPT_5_MINI);
1579        assert_eq!(ModelId::GPT5Nano.as_str(), models::GPT_5_NANO);
1580        assert_eq!(ModelId::CodexMiniLatest.as_str(), models::CODEX_MINI_LATEST);
1581        // Anthropic models
1582        assert_eq!(ModelId::ClaudeSonnet45.as_str(), models::CLAUDE_SONNET_4_5);
1583        assert_eq!(ModelId::ClaudeHaiku45.as_str(), models::CLAUDE_HAIKU_4_5);
1584        assert_eq!(
1585            ModelId::ClaudeSonnet4.as_str(),
1586            models::CLAUDE_SONNET_4_5_20250929
1587        );
1588        assert_eq!(ModelId::ClaudeOpus41.as_str(), models::CLAUDE_OPUS_4_1);
1589        // DeepSeek models
1590        assert_eq!(ModelId::DeepSeekChat.as_str(), models::DEEPSEEK_CHAT);
1591        assert_eq!(
1592            ModelId::DeepSeekReasoner.as_str(),
1593            models::DEEPSEEK_REASONER
1594        );
1595        // xAI models
1596        assert_eq!(ModelId::XaiGrok4.as_str(), models::xai::GROK_4);
1597        assert_eq!(ModelId::XaiGrok4Mini.as_str(), models::xai::GROK_4_MINI);
1598        assert_eq!(ModelId::XaiGrok4Code.as_str(), models::xai::GROK_4_CODE);
1599        assert_eq!(
1600            ModelId::XaiGrok4CodeLatest.as_str(),
1601            models::xai::GROK_4_CODE_LATEST
1602        );
1603        assert_eq!(ModelId::XaiGrok4Vision.as_str(), models::xai::GROK_4_VISION);
1604        // Z.AI models
1605        assert_eq!(ModelId::ZaiGlm46.as_str(), models::zai::GLM_4_6);
1606        assert_eq!(ModelId::ZaiGlm45.as_str(), models::zai::GLM_4_5);
1607        assert_eq!(ModelId::ZaiGlm45Air.as_str(), models::zai::GLM_4_5_AIR);
1608        assert_eq!(ModelId::ZaiGlm45X.as_str(), models::zai::GLM_4_5_X);
1609        assert_eq!(ModelId::ZaiGlm45Airx.as_str(), models::zai::GLM_4_5_AIRX);
1610        assert_eq!(ModelId::ZaiGlm45Flash.as_str(), models::zai::GLM_4_5_FLASH);
1611        assert_eq!(
1612            ModelId::ZaiGlm432b0414128k.as_str(),
1613            models::zai::GLM_4_32B_0414_128K
1614        );
1615        for entry in openrouter_generated::ENTRIES {
1616            assert_eq!(entry.variant.as_str(), entry.id);
1617        }
1618    }
1619
1620    #[test]
1621    fn test_model_from_string() {
1622        // Gemini models
1623        assert_eq!(
1624            models::GEMINI_2_5_FLASH_PREVIEW.parse::<ModelId>().unwrap(),
1625            ModelId::Gemini25FlashPreview
1626        );
1627        assert_eq!(
1628            models::GEMINI_2_5_FLASH.parse::<ModelId>().unwrap(),
1629            ModelId::Gemini25Flash
1630        );
1631        assert_eq!(
1632            models::GEMINI_2_5_FLASH_LITE.parse::<ModelId>().unwrap(),
1633            ModelId::Gemini25FlashLite
1634        );
1635        assert_eq!(
1636            models::GEMINI_2_5_PRO.parse::<ModelId>().unwrap(),
1637            ModelId::Gemini25Pro
1638        );
1639        // OpenAI models
1640        assert_eq!(models::GPT_5.parse::<ModelId>().unwrap(), ModelId::GPT5);
1641        assert_eq!(
1642            models::GPT_5_CODEX.parse::<ModelId>().unwrap(),
1643            ModelId::GPT5Codex
1644        );
1645        assert_eq!(
1646            models::GPT_5_MINI.parse::<ModelId>().unwrap(),
1647            ModelId::GPT5Mini
1648        );
1649        assert_eq!(
1650            models::GPT_5_NANO.parse::<ModelId>().unwrap(),
1651            ModelId::GPT5Nano
1652        );
1653        assert_eq!(
1654            models::CODEX_MINI_LATEST.parse::<ModelId>().unwrap(),
1655            ModelId::CodexMiniLatest
1656        );
1657        assert_eq!(
1658            models::openai::GPT_OSS_20B.parse::<ModelId>().unwrap(),
1659            ModelId::OpenAIGptOss20b
1660        );
1661        assert_eq!(
1662            models::openai::GPT_OSS_120B.parse::<ModelId>().unwrap(),
1663            ModelId::OpenAIGptOss120b
1664        );
1665        // Anthropic models
1666        assert_eq!(
1667            models::CLAUDE_SONNET_4_5.parse::<ModelId>().unwrap(),
1668            ModelId::ClaudeSonnet45
1669        );
1670        assert_eq!(
1671            models::CLAUDE_HAIKU_4_5.parse::<ModelId>().unwrap(),
1672            ModelId::ClaudeHaiku45
1673        );
1674        assert_eq!(
1675            models::CLAUDE_SONNET_4_5_20250929
1676                .parse::<ModelId>()
1677                .unwrap(),
1678            ModelId::ClaudeSonnet4
1679        );
1680        assert_eq!(
1681            models::CLAUDE_OPUS_4_1.parse::<ModelId>().unwrap(),
1682            ModelId::ClaudeOpus41
1683        );
1684        // DeepSeek models
1685        assert_eq!(
1686            models::DEEPSEEK_CHAT.parse::<ModelId>().unwrap(),
1687            ModelId::DeepSeekChat
1688        );
1689        assert_eq!(
1690            models::DEEPSEEK_REASONER.parse::<ModelId>().unwrap(),
1691            ModelId::DeepSeekReasoner
1692        );
1693        // xAI models
1694        assert_eq!(
1695            models::xai::GROK_4.parse::<ModelId>().unwrap(),
1696            ModelId::XaiGrok4
1697        );
1698        assert_eq!(
1699            models::xai::GROK_4_MINI.parse::<ModelId>().unwrap(),
1700            ModelId::XaiGrok4Mini
1701        );
1702        assert_eq!(
1703            models::xai::GROK_4_CODE.parse::<ModelId>().unwrap(),
1704            ModelId::XaiGrok4Code
1705        );
1706        assert_eq!(
1707            models::xai::GROK_4_CODE_LATEST.parse::<ModelId>().unwrap(),
1708            ModelId::XaiGrok4CodeLatest
1709        );
1710        assert_eq!(
1711            models::xai::GROK_4_VISION.parse::<ModelId>().unwrap(),
1712            ModelId::XaiGrok4Vision
1713        );
1714        // Z.AI models
1715        assert_eq!(
1716            models::zai::GLM_4_6.parse::<ModelId>().unwrap(),
1717            ModelId::ZaiGlm46
1718        );
1719        assert_eq!(
1720            models::zai::GLM_4_5.parse::<ModelId>().unwrap(),
1721            ModelId::ZaiGlm45
1722        );
1723        assert_eq!(
1724            models::zai::GLM_4_5_AIR.parse::<ModelId>().unwrap(),
1725            ModelId::ZaiGlm45Air
1726        );
1727        assert_eq!(
1728            models::zai::GLM_4_5_X.parse::<ModelId>().unwrap(),
1729            ModelId::ZaiGlm45X
1730        );
1731        assert_eq!(
1732            models::zai::GLM_4_5_AIRX.parse::<ModelId>().unwrap(),
1733            ModelId::ZaiGlm45Airx
1734        );
1735        assert_eq!(
1736            models::zai::GLM_4_5_FLASH.parse::<ModelId>().unwrap(),
1737            ModelId::ZaiGlm45Flash
1738        );
1739        assert_eq!(
1740            models::zai::GLM_4_32B_0414_128K.parse::<ModelId>().unwrap(),
1741            ModelId::ZaiGlm432b0414128k
1742        );
1743        for entry in openrouter_generated::ENTRIES {
1744            assert_eq!(entry.id.parse::<ModelId>().unwrap(), entry.variant);
1745        }
1746        // Invalid model
1747        assert!("invalid-model".parse::<ModelId>().is_err());
1748    }
1749
1750    #[test]
1751    fn test_provider_parsing() {
1752        assert_eq!("gemini".parse::<Provider>().unwrap(), Provider::Gemini);
1753        assert_eq!("openai".parse::<Provider>().unwrap(), Provider::OpenAI);
1754        assert_eq!(
1755            "anthropic".parse::<Provider>().unwrap(),
1756            Provider::Anthropic
1757        );
1758        assert_eq!("deepseek".parse::<Provider>().unwrap(), Provider::DeepSeek);
1759        assert_eq!(
1760            "openrouter".parse::<Provider>().unwrap(),
1761            Provider::OpenRouter
1762        );
1763        assert_eq!("xai".parse::<Provider>().unwrap(), Provider::XAI);
1764        assert_eq!("zai".parse::<Provider>().unwrap(), Provider::ZAI);
1765        assert_eq!("moonshot".parse::<Provider>().unwrap(), Provider::Moonshot);
1766        assert_eq!("lmstudio".parse::<Provider>().unwrap(), Provider::LmStudio);
1767        assert!("invalid-provider".parse::<Provider>().is_err());
1768    }
1769
1770    #[test]
1771    fn test_model_providers() {
1772        assert_eq!(ModelId::Gemini25FlashPreview.provider(), Provider::Gemini);
1773        assert_eq!(ModelId::GPT5.provider(), Provider::OpenAI);
1774        assert_eq!(ModelId::GPT5Codex.provider(), Provider::OpenAI);
1775        assert_eq!(ModelId::ClaudeSonnet45.provider(), Provider::Anthropic);
1776        assert_eq!(ModelId::ClaudeHaiku45.provider(), Provider::Anthropic);
1777        assert_eq!(ModelId::ClaudeSonnet4.provider(), Provider::Anthropic);
1778        assert_eq!(ModelId::DeepSeekChat.provider(), Provider::DeepSeek);
1779        assert_eq!(ModelId::XaiGrok4.provider(), Provider::XAI);
1780        assert_eq!(ModelId::ZaiGlm46.provider(), Provider::ZAI);
1781        assert_eq!(ModelId::OllamaGptOss20b.provider(), Provider::Ollama);
1782        assert_eq!(ModelId::OllamaGptOss120bCloud.provider(), Provider::Ollama);
1783        assert_eq!(ModelId::OllamaQwen317b.provider(), Provider::Ollama);
1784        assert_eq!(
1785            ModelId::LmStudioMetaLlama38BInstruct.provider(),
1786            Provider::LmStudio
1787        );
1788        assert_eq!(
1789            ModelId::LmStudioMetaLlama318BInstruct.provider(),
1790            Provider::LmStudio
1791        );
1792        assert_eq!(
1793            ModelId::LmStudioQwen257BInstruct.provider(),
1794            Provider::LmStudio
1795        );
1796        assert_eq!(ModelId::LmStudioGemma22BIt.provider(), Provider::LmStudio);
1797        assert_eq!(ModelId::LmStudioGemma29BIt.provider(), Provider::LmStudio);
1798        assert_eq!(
1799            ModelId::LmStudioPhi31Mini4kInstruct.provider(),
1800            Provider::LmStudio
1801        );
1802        assert_eq!(
1803            ModelId::OpenRouterGrokCodeFast1.provider(),
1804            Provider::OpenRouter
1805        );
1806        assert_eq!(
1807            ModelId::OpenRouterAnthropicClaudeSonnet45.provider(),
1808            Provider::OpenRouter
1809        );
1810
1811        for entry in openrouter_generated::ENTRIES {
1812            assert_eq!(entry.variant.provider(), Provider::OpenRouter);
1813        }
1814    }
1815
1816    #[test]
1817    fn test_provider_defaults() {
1818        assert_eq!(
1819            ModelId::default_orchestrator_for_provider(Provider::Gemini),
1820            ModelId::Gemini25Pro
1821        );
1822        assert_eq!(
1823            ModelId::default_orchestrator_for_provider(Provider::OpenAI),
1824            ModelId::GPT5
1825        );
1826        assert_eq!(
1827            ModelId::default_orchestrator_for_provider(Provider::Anthropic),
1828            ModelId::ClaudeOpus45
1829        );
1830        assert_eq!(
1831            ModelId::default_orchestrator_for_provider(Provider::DeepSeek),
1832            ModelId::DeepSeekReasoner
1833        );
1834        assert_eq!(
1835            ModelId::default_orchestrator_for_provider(Provider::OpenRouter),
1836            ModelId::OpenRouterGrokCodeFast1
1837        );
1838        assert_eq!(
1839            ModelId::default_orchestrator_for_provider(Provider::XAI),
1840            ModelId::XaiGrok4
1841        );
1842        assert_eq!(
1843            ModelId::default_orchestrator_for_provider(Provider::Ollama),
1844            ModelId::OllamaGptOss20b
1845        );
1846        assert_eq!(
1847            ModelId::default_orchestrator_for_provider(Provider::LmStudio),
1848            ModelId::LmStudioMetaLlama318BInstruct
1849        );
1850        assert_eq!(
1851            ModelId::default_orchestrator_for_provider(Provider::ZAI),
1852            ModelId::ZaiGlm46
1853        );
1854
1855        assert_eq!(
1856            ModelId::default_subagent_for_provider(Provider::Gemini),
1857            ModelId::Gemini25FlashPreview
1858        );
1859        assert_eq!(
1860            ModelId::default_subagent_for_provider(Provider::OpenAI),
1861            ModelId::GPT5Mini
1862        );
1863        assert_eq!(
1864            ModelId::default_subagent_for_provider(Provider::Anthropic),
1865            ModelId::ClaudeSonnet45
1866        );
1867        assert_eq!(
1868            ModelId::default_subagent_for_provider(Provider::DeepSeek),
1869            ModelId::DeepSeekChat
1870        );
1871        assert_eq!(
1872            ModelId::default_subagent_for_provider(Provider::OpenRouter),
1873            ModelId::OpenRouterGrokCodeFast1
1874        );
1875        assert_eq!(
1876            ModelId::default_subagent_for_provider(Provider::XAI),
1877            ModelId::XaiGrok4Code
1878        );
1879        assert_eq!(
1880            ModelId::default_subagent_for_provider(Provider::Ollama),
1881            ModelId::OllamaQwen317b
1882        );
1883        assert_eq!(
1884            ModelId::default_subagent_for_provider(Provider::LmStudio),
1885            ModelId::LmStudioQwen257BInstruct
1886        );
1887        assert_eq!(
1888            ModelId::default_subagent_for_provider(Provider::ZAI),
1889            ModelId::ZaiGlm45Flash
1890        );
1891        // Moonshot provider now uses OpenRouter models instead of direct API
1892
1893        assert_eq!(
1894            ModelId::default_single_for_provider(Provider::DeepSeek),
1895            ModelId::DeepSeekReasoner
1896        );
1897        assert_eq!(
1898            ModelId::default_single_for_provider(Provider::Ollama),
1899            ModelId::OllamaGptOss20b
1900        );
1901        assert_eq!(
1902            ModelId::default_single_for_provider(Provider::LmStudio),
1903            ModelId::LmStudioMetaLlama318BInstruct
1904        );
1905    }
1906
1907    #[test]
1908    fn test_model_defaults() {
1909        assert_eq!(ModelId::default(), ModelId::Gemini25FlashPreview);
1910        assert_eq!(ModelId::default_orchestrator(), ModelId::Gemini25Pro);
1911        assert_eq!(ModelId::default_subagent(), ModelId::Gemini25FlashPreview);
1912    }
1913
1914    #[test]
1915    fn test_model_variants() {
1916        // Flash variants
1917        assert!(ModelId::Gemini25FlashPreview.is_flash_variant());
1918        assert!(ModelId::Gemini25Flash.is_flash_variant());
1919        assert!(ModelId::Gemini25FlashLite.is_flash_variant());
1920        assert!(!ModelId::GPT5.is_flash_variant());
1921        assert!(ModelId::ZaiGlm45Flash.is_flash_variant());
1922
1923        // Pro variants
1924        assert!(ModelId::Gemini25Pro.is_pro_variant());
1925        assert!(ModelId::GPT5.is_pro_variant());
1926        assert!(ModelId::GPT5Codex.is_pro_variant());
1927        assert!(ModelId::DeepSeekReasoner.is_pro_variant());
1928        assert!(ModelId::ZaiGlm46.is_pro_variant());
1929        assert!(!ModelId::Gemini25FlashPreview.is_pro_variant());
1930
1931        // Efficient variants
1932        assert!(ModelId::Gemini25FlashPreview.is_efficient_variant());
1933        assert!(ModelId::Gemini25Flash.is_efficient_variant());
1934        assert!(ModelId::Gemini25FlashLite.is_efficient_variant());
1935        assert!(ModelId::GPT5Mini.is_efficient_variant());
1936        assert!(ModelId::ClaudeHaiku45.is_efficient_variant());
1937        assert!(ModelId::XaiGrok4Code.is_efficient_variant());
1938        assert!(ModelId::DeepSeekChat.is_efficient_variant());
1939        assert!(ModelId::ZaiGlm45Air.is_efficient_variant());
1940        assert!(ModelId::ZaiGlm45Airx.is_efficient_variant());
1941        assert!(ModelId::ZaiGlm45Flash.is_efficient_variant());
1942        assert!(!ModelId::GPT5.is_efficient_variant());
1943
1944        for entry in openrouter_generated::ENTRIES {
1945            assert_eq!(entry.variant.is_efficient_variant(), entry.efficient);
1946        }
1947
1948        // Top tier models
1949        assert!(ModelId::Gemini25Pro.is_top_tier());
1950        assert!(ModelId::GPT5.is_top_tier());
1951        assert!(ModelId::GPT5Codex.is_top_tier());
1952        assert!(ModelId::ClaudeSonnet45.is_top_tier());
1953        assert!(ModelId::ClaudeSonnet4.is_top_tier());
1954        assert!(ModelId::XaiGrok4.is_top_tier());
1955        assert!(ModelId::XaiGrok4CodeLatest.is_top_tier());
1956        assert!(ModelId::DeepSeekReasoner.is_top_tier());
1957        assert!(ModelId::ZaiGlm46.is_top_tier());
1958        assert!(!ModelId::Gemini25FlashPreview.is_top_tier());
1959        assert!(!ModelId::ClaudeHaiku45.is_top_tier());
1960
1961        for entry in openrouter_generated::ENTRIES {
1962            assert_eq!(entry.variant.is_top_tier(), entry.top_tier);
1963        }
1964    }
1965
1966    #[test]
1967    fn test_model_generation() {
1968        // Gemini generations
1969        assert_eq!(ModelId::Gemini25FlashPreview.generation(), "2.5");
1970        assert_eq!(ModelId::Gemini25Flash.generation(), "2.5");
1971        assert_eq!(ModelId::Gemini25FlashLite.generation(), "2.5");
1972        assert_eq!(ModelId::Gemini25Pro.generation(), "2.5");
1973
1974        // OpenAI generations
1975        assert_eq!(ModelId::GPT5.generation(), "5");
1976        assert_eq!(ModelId::GPT5Codex.generation(), "5");
1977        assert_eq!(ModelId::GPT5Mini.generation(), "5");
1978        assert_eq!(ModelId::GPT5Nano.generation(), "5");
1979        assert_eq!(ModelId::CodexMiniLatest.generation(), "5");
1980
1981        // Anthropic generations
1982        assert_eq!(ModelId::ClaudeSonnet45.generation(), "4.5");
1983        assert_eq!(ModelId::ClaudeHaiku45.generation(), "4.5");
1984        assert_eq!(ModelId::ClaudeSonnet4.generation(), "4");
1985        assert_eq!(ModelId::ClaudeOpus41.generation(), "4.1");
1986
1987        // DeepSeek generations
1988        assert_eq!(ModelId::DeepSeekChat.generation(), "V3.2-Exp");
1989        assert_eq!(ModelId::DeepSeekReasoner.generation(), "V3.2-Exp");
1990
1991        // xAI generations
1992        assert_eq!(ModelId::XaiGrok4.generation(), "4");
1993        assert_eq!(ModelId::XaiGrok4Mini.generation(), "4");
1994        assert_eq!(ModelId::XaiGrok4Code.generation(), "4");
1995        assert_eq!(ModelId::XaiGrok4CodeLatest.generation(), "4");
1996        assert_eq!(ModelId::XaiGrok4Vision.generation(), "4");
1997        // Z.AI generations
1998        assert_eq!(ModelId::ZaiGlm46.generation(), "4.6");
1999        assert_eq!(ModelId::ZaiGlm45.generation(), "4.5");
2000        assert_eq!(ModelId::ZaiGlm45Air.generation(), "4.5");
2001        assert_eq!(ModelId::ZaiGlm45X.generation(), "4.5");
2002        assert_eq!(ModelId::ZaiGlm45Airx.generation(), "4.5");
2003        assert_eq!(ModelId::ZaiGlm45Flash.generation(), "4.5");
2004        assert_eq!(ModelId::ZaiGlm432b0414128k.generation(), "4-32B");
2005        assert_eq!(
2006            ModelId::LmStudioMetaLlama38BInstruct.generation(),
2007            "meta-llama-3"
2008        );
2009        assert_eq!(
2010            ModelId::LmStudioMetaLlama318BInstruct.generation(),
2011            "meta-llama-3.1"
2012        );
2013        assert_eq!(ModelId::LmStudioQwen257BInstruct.generation(), "qwen2.5");
2014        assert_eq!(ModelId::LmStudioGemma22BIt.generation(), "gemma-2");
2015        assert_eq!(ModelId::LmStudioGemma29BIt.generation(), "gemma-2");
2016        assert_eq!(ModelId::LmStudioPhi31Mini4kInstruct.generation(), "phi-3.1");
2017
2018        for entry in openrouter_generated::ENTRIES {
2019            assert_eq!(entry.variant.generation(), entry.generation);
2020        }
2021    }
2022
2023    #[test]
2024    fn test_models_for_provider() {
2025        let gemini_models = ModelId::models_for_provider(Provider::Gemini);
2026        assert!(gemini_models.contains(&ModelId::Gemini25Pro));
2027        assert!(!gemini_models.contains(&ModelId::GPT5));
2028
2029        let openai_models = ModelId::models_for_provider(Provider::OpenAI);
2030        assert!(openai_models.contains(&ModelId::GPT5));
2031        assert!(openai_models.contains(&ModelId::GPT5Codex));
2032        assert!(!openai_models.contains(&ModelId::Gemini25Pro));
2033
2034        let anthropic_models = ModelId::models_for_provider(Provider::Anthropic);
2035        assert!(anthropic_models.contains(&ModelId::ClaudeSonnet45));
2036        assert!(anthropic_models.contains(&ModelId::ClaudeHaiku45));
2037        assert!(anthropic_models.contains(&ModelId::ClaudeSonnet4));
2038        assert!(!anthropic_models.contains(&ModelId::GPT5));
2039
2040        let deepseek_models = ModelId::models_for_provider(Provider::DeepSeek);
2041        assert!(deepseek_models.contains(&ModelId::DeepSeekChat));
2042        assert!(deepseek_models.contains(&ModelId::DeepSeekReasoner));
2043
2044        let openrouter_models = ModelId::models_for_provider(Provider::OpenRouter);
2045        for entry in openrouter_generated::ENTRIES {
2046            assert!(openrouter_models.contains(&entry.variant));
2047        }
2048
2049        let xai_models = ModelId::models_for_provider(Provider::XAI);
2050        assert!(xai_models.contains(&ModelId::XaiGrok4));
2051        assert!(xai_models.contains(&ModelId::XaiGrok4Mini));
2052        assert!(xai_models.contains(&ModelId::XaiGrok4Code));
2053        assert!(xai_models.contains(&ModelId::XaiGrok4CodeLatest));
2054        assert!(xai_models.contains(&ModelId::XaiGrok4Vision));
2055
2056        let zai_models = ModelId::models_for_provider(Provider::ZAI);
2057        assert!(zai_models.contains(&ModelId::ZaiGlm46));
2058        assert!(zai_models.contains(&ModelId::ZaiGlm45));
2059        assert!(zai_models.contains(&ModelId::ZaiGlm45Air));
2060        assert!(zai_models.contains(&ModelId::ZaiGlm45X));
2061        assert!(zai_models.contains(&ModelId::ZaiGlm45Airx));
2062        assert!(zai_models.contains(&ModelId::ZaiGlm45Flash));
2063        assert!(zai_models.contains(&ModelId::ZaiGlm432b0414128k));
2064
2065        let moonshot_models = ModelId::models_for_provider(Provider::Moonshot);
2066        assert_eq!(moonshot_models.len(), 0); // No Moonshot models available
2067
2068        let ollama_models = ModelId::models_for_provider(Provider::Ollama);
2069        assert!(ollama_models.contains(&ModelId::OllamaGptOss20b));
2070        assert!(ollama_models.contains(&ModelId::OllamaGptOss20bCloud));
2071        assert!(ollama_models.contains(&ModelId::OllamaGptOss120bCloud));
2072        assert!(ollama_models.contains(&ModelId::OllamaQwen317b));
2073        assert!(ollama_models.contains(&ModelId::OllamaDeepseekV32Cloud));
2074        assert!(ollama_models.contains(&ModelId::OllamaQwen3Next80bCloud));
2075        assert!(ollama_models.contains(&ModelId::OllamaMistralLarge3675bCloud));
2076        assert!(ollama_models.contains(&ModelId::OllamaKimiK2ThinkingCloud));
2077        assert!(ollama_models.contains(&ModelId::OllamaQwen3Coder480bCloud));
2078        assert!(ollama_models.contains(&ModelId::OllamaGlm46Cloud));
2079        assert!(ollama_models.contains(&ModelId::OllamaGemini3ProPreviewLatestCloud));
2080        assert!(ollama_models.contains(&ModelId::OllamaGemini3FlashPreviewCloud));
2081        assert!(ollama_models.contains(&ModelId::OllamaDevstral2123bCloud));
2082        assert!(ollama_models.contains(&ModelId::OllamaMinimaxM2Cloud));
2083        assert!(ollama_models.contains(&ModelId::OllamaMinimaxM21Cloud));
2084        assert!(ollama_models.contains(&ModelId::OllamaNemotron3Nano30bCloud));
2085        assert!(ollama_models.contains(&ModelId::OllamaGlm47Cloud));
2086        assert_eq!(ollama_models.len(), 17); // 17 Ollama models
2087
2088        let lmstudio_models = ModelId::models_for_provider(Provider::LmStudio);
2089        assert!(lmstudio_models.contains(&ModelId::LmStudioMetaLlama38BInstruct));
2090        assert!(lmstudio_models.contains(&ModelId::LmStudioMetaLlama318BInstruct));
2091        assert!(lmstudio_models.contains(&ModelId::LmStudioQwen257BInstruct));
2092        assert!(lmstudio_models.contains(&ModelId::LmStudioGemma22BIt));
2093        assert!(lmstudio_models.contains(&ModelId::LmStudioGemma29BIt));
2094        assert!(lmstudio_models.contains(&ModelId::LmStudioPhi31Mini4kInstruct));
2095        assert_eq!(lmstudio_models.len(), 6);
2096    }
2097
2098    #[test]
2099    fn test_ollama_cloud_models() {
2100        use crate::constants::models;
2101
2102        // Test parsing of new Ollama cloud models
2103        let model_pairs = vec![
2104            (
2105                ModelId::OllamaGptOss20bCloud,
2106                models::ollama::GPT_OSS_20B_CLOUD,
2107            ),
2108            (
2109                ModelId::OllamaGptOss120bCloud,
2110                models::ollama::GPT_OSS_120B_CLOUD,
2111            ),
2112            (
2113                ModelId::OllamaDeepseekV32Cloud,
2114                models::ollama::DEEPSEEK_V32_CLOUD,
2115            ),
2116            (
2117                ModelId::OllamaQwen3Coder480bCloud,
2118                models::ollama::QWEN3_CODER_480B_CLOUD,
2119            ),
2120            (ModelId::OllamaGlm46Cloud, models::ollama::GLM_46_CLOUD),
2121            (
2122                ModelId::OllamaMinimaxM2Cloud,
2123                models::ollama::MINIMAX_M2_CLOUD,
2124            ),
2125        ];
2126
2127        for (model_id, expected_str) in model_pairs {
2128            assert_eq!(model_id.as_str(), expected_str);
2129            assert_eq!(ModelId::from_str(expected_str).unwrap(), model_id);
2130            assert_eq!(model_id.provider(), Provider::Ollama);
2131
2132            // Verify display names are not empty
2133            assert!(!model_id.display_name().is_empty());
2134
2135            // Verify descriptions are not empty
2136            assert!(!model_id.description().is_empty());
2137
2138            // Verify generation is not empty
2139            assert!(!model_id.generation().is_empty());
2140        }
2141    }
2142
2143    #[test]
2144    fn test_fallback_models() {
2145        let fallbacks = ModelId::fallback_models();
2146        assert!(!fallbacks.is_empty());
2147        assert!(fallbacks.contains(&ModelId::Gemini25Pro));
2148        assert!(fallbacks.contains(&ModelId::GPT5));
2149        assert!(fallbacks.contains(&ModelId::ClaudeOpus41));
2150        assert!(fallbacks.contains(&ModelId::ClaudeSonnet45));
2151        assert!(fallbacks.contains(&ModelId::DeepSeekReasoner));
2152        assert!(fallbacks.contains(&ModelId::XaiGrok4));
2153        assert!(fallbacks.contains(&ModelId::ZaiGlm46));
2154        assert!(fallbacks.contains(&ModelId::OpenRouterGrokCodeFast1));
2155    }
2156}