Skip to main content

vtcode_config/models/
model_id.rs

1use serde::{Deserialize, Serialize};
2
3mod as_str;
4mod capabilities;
5mod collection;
6mod defaults;
7mod description;
8mod display;
9mod format;
10mod openrouter;
11mod parse;
12mod provider;
13
14pub use capabilities::{
15    ModelCatalogEntry, ModelPricing, catalog_provider_keys, model_catalog_entry,
16    supported_models_for_provider,
17};
18
19/// Centralized enum for all supported model identifiers
20#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
21#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
22pub enum ModelId {
23    // Gemini models
24    /// Gemini 3.1 Pro Preview - Latest Gemini 3.1 Pro flagship
25    Gemini31ProPreview,
26    /// Gemini 3.1 Pro Preview Custom Tools - Optimized for custom tools & bash
27    Gemini31ProPreviewCustomTools,
28    /// Gemini 3.1 Flash Lite Preview - Most cost-efficient model, offering fastest performance for high-frequency, lightweight tasks
29    Gemini31FlashLitePreview,
30    /// Gemini 3 Flash Preview - Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding
31    #[default]
32    Gemini3FlashPreview,
33
34    // OpenAI models
35    /// GPT-5.5 - Next-gen OpenAI model dated release (2026-04-23)
36    GPT55,
37    /// GPT-5 - Latest most capable OpenAI model (2025-08-07)
38    GPT5,
39    /// GPT-5.2 - Latest flagship general-purpose OpenAI model (2025-12-11)
40    GPT52,
41    /// GPT-5.2 Codex - Code-focused GPT-5.2 variant optimized for agentic coding with xhigh reasoning support
42    GPT52Codex,
43    /// GPT-5.4 - Mainline frontier GPT model for general-purpose and coding work
44    GPT54,
45    /// GPT-5.4 Pro - Higher-compute GPT-5.4 variant for difficult problems
46    GPT54Pro,
47    /// GPT-5.4 Nano - Lightweight GPT-5.4 variant optimized for speed and cost-efficiency
48    GPT54Nano,
49    /// GPT-5.4 Mini - Compact GPT-5.4 variant for cost-effective tasks with reduced reasoning overhead
50    GPT54Mini,
51    /// GPT-5.3 Codex - Code-focused GPT-5.3 variant optimized for agentic coding with reasoning effort support (low, medium, high, xhigh)
52    GPT53Codex,
53    /// GPT-5.1 Codex - Code-focused GPT-5.1 variant optimized for agentic coding
54    GPT51Codex,
55    /// GPT-5.1 Codex Max - Higher-compute GPT-5.1 Codex variant for longer-running engineering tasks
56    GPT51CodexMax,
57    /// GPT-5 Mini - Latest efficient OpenAI model (2025-08-07)
58    GPT5Mini,
59    /// GPT-5 Nano - Latest most cost-effective OpenAI model (2025-08-07)
60    GPT5Nano,
61    /// GPT-5 Codex - Code-focused GPT-5 variant optimized for agentic coding
62    GPT5Codex,
63    /// GPT-OSS 20B - OpenAI's open-source 20B parameter model using harmony
64    OpenAIGptOss20b,
65    /// GPT-OSS 120B - OpenAI's open-source 120B parameter model using harmony
66    OpenAIGptOss120b,
67
68    // Anthropic models
69    /// Claude Opus 4.7 - Next-gen flagship Anthropic model with adaptive thinking
70    ClaudeOpus47,
71    /// Claude Opus 4.6 - Previous Anthropic flagship retained for compatibility
72    ClaudeOpus46,
73    /// Claude Sonnet 4.6 - Balanced flagship Anthropic model in VT Code's conservative rollout
74    ClaudeSonnet46,
75    /// Claude Haiku 4.5 - Latest efficient Anthropic model (2025-10-15)
76    ClaudeHaiku45,
77    /// Claude Mythos Preview - Invitation-only Anthropic research preview for defensive cybersecurity workflows
78    ClaudeMythosPreview,
79    /// GitHub Copilot auto model selection
80    CopilotAuto,
81    /// GitHub Copilot GPT-5.2 Codex
82    CopilotGPT52Codex,
83    /// GitHub Copilot GPT-5.1 Codex Max
84    CopilotGPT51CodexMax,
85    /// GitHub Copilot GPT-5.4
86    CopilotGPT54,
87    /// GitHub Copilot GPT-5.4 Mini
88    CopilotGPT54Mini,
89    /// GitHub Copilot Claude Sonnet 4.6
90    CopilotClaudeSonnet46,
91
92    // DeepSeek models
93    /// DeepSeek V4 Pro - High-performance reasoning model with advanced thinking
94    DeepSeekV4Pro,
95    /// DeepSeek V4 Flash - Fast inference model for cost-effective reasoning
96    DeepSeekV4Flash,
97    // Hugging Face models
98    /// DeepSeek V3.2 via Hugging Face router
99    HuggingFaceDeepseekV32,
100    /// OpenAI GPT-OSS 20B via Hugging Face router
101    HuggingFaceOpenAIGptOss20b,
102    /// OpenAI GPT-OSS 120B via Hugging Face router
103    HuggingFaceOpenAIGptOss120b,
104    /// DeepSeek V3.2 via Novita on Hugging Face router
105    HuggingFaceDeepseekV32Novita,
106    /// Xiaomi MiMo-V2-Flash via Novita on Hugging Face router
107    HuggingFaceXiaomiMimoV2FlashNovita,
108    /// MiniMax M2.5 via Novita on Hugging Face router
109    HuggingFaceMinimaxM25Novita,
110    /// Z.AI GLM-5 via Novita on Hugging Face router
111    HuggingFaceGlm5Novita,
112    /// Z.AI GLM-5.1 via zai-org provider on Hugging Face router
113    HuggingFaceGlm51ZaiOrg,
114    /// Qwen3-Coder-Next via Novita inference provider on Hugging Face router
115    HuggingFaceQwen3CoderNextNovita,
116    /// Qwen3.5-397B-A17B via Together inference provider on Hugging Face router
117    HuggingFaceQwen35397BA17BTogether,
118    /// Kimi K2.6 via Novita on Hugging Face router
119    HuggingFaceKimiK26Novita,
120    /// Step 3.5 Flash via Hugging Face router
121    HuggingFaceStep35Flash,
122
123    /// GLM-5 - Flagship Z.ai foundation model for complex systems
124    ZaiGlm5,
125    /// GLM-5.1 - Next-gen Z.ai foundation model with improved reasoning
126    ZaiGlm51,
127
128    // Moonshot models
129    /// Kimi K2.6 - Moonshot.ai's latest 1T MoE flagship (32B active, MLA, MoonViT vision)
130    MoonshotKimiK26,
131    /// Kimi K2.5 - Moonshot.ai's previous flagship reasoning model
132    MoonshotKimiK25,
133
134    // OpenCode Zen models
135    /// GPT-5.4 - OpenCode Zen default flagship model
136    OpenCodeZenGPT54,
137    /// GPT-5.4 Mini - Lower-cost OpenCode Zen GPT option
138    OpenCodeZenGPT54Mini,
139    /// Claude Sonnet 4.6 - Anthropic-backed OpenCode Zen coding model
140    OpenCodeZenClaudeSonnet46,
141    /// GLM-5.1 - Z.AI model served through OpenCode Zen
142    OpenCodeZenGlm51,
143    /// Kimi K2.5 - Moonshot model served through OpenCode Zen
144    OpenCodeZenKimiK25,
145
146    // OpenCode Go models
147    /// GLM-5.1 - Z.AI model included with OpenCode Go
148    OpenCodeGoGlm51,
149    /// Kimi K2.5 - Moonshot model included with OpenCode Go
150    OpenCodeGoKimiK25,
151    /// MiniMax M2.5 - OpenCode Go subscription model
152    OpenCodeGoMinimaxM25,
153    /// MiniMax M2.7 - Higher-tier OpenCode Go subscription model
154    OpenCodeGoMinimaxM27,
155
156    // Ollama models
157    /// GPT-OSS 20B - Open-weight GPT-OSS 20B model served via Ollama locally
158    OllamaGptOss20b,
159    /// GPT-OSS 20B Cloud - Cloud-hosted GPT-OSS 20B served via Ollama Cloud
160    OllamaGptOss20bCloud,
161    /// GPT-OSS 120B Cloud - Cloud-hosted GPT-OSS 120B served via Ollama Cloud
162    OllamaGptOss120bCloud,
163    /// Qwen3 1.7B - Qwen3 1.7B model served via Ollama
164    OllamaQwen317b,
165    /// Qwen3 Coder Next - Coding-optimized Qwen3 Next model served via Ollama locally
166    OllamaQwen3CoderNext,
167    /// DeepSeek V3.2 Cloud - DeepSeek V3.2 reasoning deployment via Ollama Cloud
168    OllamaDeepseekV32Cloud,
169    /// DeepSeek V4 Flash Cloud - Fast inference DeepSeek V4 Flash model via Ollama Cloud
170    OllamaDeepseekV4FlashCloud,
171    /// DeepSeek V4 Pro Cloud - High-performance DeepSeek V4 Pro model via Ollama Cloud
172    OllamaDeepseekV4ProCloud,
173    /// Qwen3 Next 80B Cloud - Next-generation Qwen3 80B via Ollama Cloud
174    OllamaQwen3Next80bCloud,
175    /// MiniMax-M2 Cloud - Cloud-hosted MiniMax-M2 model served via Ollama Cloud
176    OllamaMinimaxM2Cloud,
177    /// MiniMax-M2.7 Cloud - Cloud-hosted MiniMax-M2.7 model served via Ollama Cloud
178    OllamaMinimaxM27Cloud,
179    /// GLM-5 Cloud - Cloud-hosted GLM-5 model served via Ollama Cloud
180    OllamaGlm5Cloud,
181    /// GLM-5.1 Cloud - Cloud-hosted GLM-5.1 model served via Ollama Cloud
182    OllamaGlm51Cloud,
183    /// MiniMax-M2.5 Cloud - Cloud-hosted MiniMax-M2.5 model served via Ollama Cloud
184    OllamaMinimaxM25Cloud,
185    /// Gemini 3 Flash Preview Cloud - Google Gemini 3 Flash Preview via Ollama Cloud
186    OllamaGemini3FlashPreviewCloud,
187    /// Kimi K2.6 Cloud - Moonshot Kimi K2.6 via Ollama Cloud
188    OllamaKimiK26Cloud,
189    /// Nemotron 3 Super Cloud - NVIDIA Nemotron 3 Super 120B via Ollama Cloud
190    OllamaNemotron3SuperCloud,
191
192    // MiniMax models
193    /// MiniMax-M2.7 - Recursive self-improvement flagship with 204.8K context
194    MinimaxM27,
195    /// MiniMax-M2.5 - Latest MiniMax model with further improvements in reasoning and coding
196    MinimaxM25,
197
198    // OpenRouter models
199    /// Qwen3 32B - Dense 32B Qwen3 deployment
200    OpenRouterQwen332b,
201    /// Qwen3 30B A3B - Active-parameter 30B Qwen3 model
202    OpenRouterQwen330bA3b,
203    /// Qwen3 30B A3B Instruct 2507 - Instruction-tuned Qwen3 30B A3B
204    OpenRouterQwen330bA3bInstruct2507,
205    /// Qwen3 30B A3B Thinking 2507 - Deliberative Qwen3 30B A3B release
206    OpenRouterQwen330bA3bThinking2507,
207    /// Qwen3 14B - Lightweight Qwen3 14B model
208    OpenRouterQwen314b,
209    /// Qwen3 8B - Compact Qwen3 8B deployment
210    OpenRouterQwen38b,
211    /// Qwen3 Next 80B A3B Instruct - Next-generation Qwen3 instruction model
212    OpenRouterQwen3Next80bA3bInstruct,
213    /// Qwen3 Next 80B A3B Thinking - Next-generation Qwen3 reasoning release
214    OpenRouterQwen3Next80bA3bThinking,
215    /// Qwen3.5-397B-A17B - Native vision-language model with linear attention and sparse MoE, 1M context window
216    OpenRouterQwen35Plus0215,
217    /// Qwen3 Coder - Qwen3-based coding model tuned for IDE workflows
218    OpenRouterQwen3Coder,
219    /// Qwen3 Coder Plus - Premium Qwen3 coding model with long context
220    OpenRouterQwen3CoderPlus,
221    /// Qwen3 Coder Flash - Latency optimised Qwen3 coding model
222    OpenRouterQwen3CoderFlash,
223    /// Qwen3 Coder 30B A3B Instruct - Large Mixture-of-Experts coding deployment
224    OpenRouterQwen3Coder30bA3bInstruct,
225    /// Qwen3 Coder Next - Next-generation Qwen3 coding model with enhanced reasoning
226    OpenRouterQwen3CoderNext,
227    /// DeepSeek V4 Pro - High-performance reasoning model via OpenRouter
228    OpenRouterDeepSeekV4Pro,
229    /// DeepSeek V4 Flash - Fast inference model via OpenRouter
230    OpenRouterDeepSeekV4Flash,
231    /// DeepSeek R1 - DeepSeek R1 reasoning model with chain-of-thought
232    OpenRouterDeepSeekR1,
233    /// DeepSeek V3.1 - DeepSeek V3.1 chat model via OpenRouter
234    OpenRouterDeepSeekChatV31,
235    /// OpenAI gpt-oss-120b - Open-weight 120B reasoning model via OpenRouter
236    OpenRouterOpenAIGptOss120b,
237    /// OpenAI gpt-oss-120b:free - Open-weight 120B reasoning model free tier via OpenRouter
238    OpenRouterOpenAIGptOss120bFree,
239    /// OpenAI gpt-oss-20b - Open-weight 20B deployment via OpenRouter
240    OpenRouterOpenAIGptOss20b,
241    /// OpenAI GPT-5 - OpenAI GPT-5 model accessed through OpenRouter
242    OpenRouterOpenAIGpt5,
243    /// OpenAI GPT-5.5 - OpenAI GPT-5.5 model accessed through OpenRouter
244    OpenRouterOpenAIGpt55,
245    /// OpenAI GPT-5 Chat - Chat optimised GPT-5 endpoint without tool use
246    OpenRouterOpenAIGpt5Chat,
247
248    /// Gemini 3.1 Pro Preview - Google's latest Gemini 3.1 Pro model via OpenRouter
249    OpenRouterGoogleGemini31ProPreview,
250
251    /// Claude Sonnet 4.5 - Anthropic Claude Sonnet 4.5 listing
252    OpenRouterAnthropicClaudeSonnet45,
253    /// Claude Sonnet 4.6 - Anthropic Claude Sonnet 4.6 listing
254    OpenRouterAnthropicClaudeSonnet46,
255    /// Claude Haiku 4.5 - Anthropic Claude Haiku 4.5 listing
256    OpenRouterAnthropicClaudeHaiku45,
257    /// Mistral Large 3 2512 - Mistral Large 3 2512 model via OpenRouter
258    OpenRouterMistralaiMistralLarge2512,
259    /// DeepSeek V3.1 Nex N1 - Nex AGI DeepSeek V3.1 Nex N1 model via OpenRouter
260    OpenRouterNexAgiDeepseekV31NexN1,
261    /// Step 3.5 Flash (free) - StepFun's most capable open-source reasoning model via OpenRouter
262    OpenRouterStepfunStep35FlashFree,
263    /// Nemotron 3 Super (free) - NVIDIA's 120B hybrid MoE model via OpenRouter
264    OpenRouterNvidiaNemotron3Super120bA12bFree,
265    /// GLM-5 - Z.AI GLM-5 flagship foundation model via OpenRouter
266    OpenRouterZaiGlm5,
267    /// GLM-4.7 - Z.AI GLM-4.7 model via OpenRouter
268    OpenRouterZaiGlm47,
269    /// GLM-5.1 - Z.AI GLM-5.1 next-gen foundation model via OpenRouter
270    OpenRouterZaiGlm51,
271    /// MiniMax-M2.5 - MiniMax flagship model via OpenRouter
272    OpenRouterMinimaxM25,
273    /// Kimi K2.6 - Moonshot AI's next-generation multimodal model via OpenRouter
274    OpenRouterMoonshotaiKimiK26,
275}