vtcode_config/models/model_id/
description.rs1use super::ModelId;
2
3impl ModelId {
4 pub fn description(&self) -> &'static str {
6 if let Some(meta) = self.openrouter_metadata() {
7 return meta.description;
8 }
9 match self {
10 ModelId::Gemini31ProPreview => {
12 "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13 }
14 ModelId::Gemini31ProPreviewCustomTools => {
15 "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16 }
17 ModelId::Gemini31FlashLitePreview => {
18 "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19 }
20 ModelId::Gemini3FlashPreview => {
21 "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22 }
23 ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25 ModelId::GPT52 => {
26 "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27 }
28 ModelId::GPT52Codex => {
29 "GPT-5.2 Codex variant optimized for agentic coding tasks with xhigh reasoning support"
30 }
31 ModelId::GPT54 => {
32 "Mainline frontier GPT model for general-purpose work, coding, long context, and multi-step agents"
33 }
34 ModelId::GPT54Pro => {
35 "Higher-compute GPT-5.4 variant for tougher problems with deeper reasoning"
36 }
37 ModelId::GPT54Nano => {
38 "Lightweight GPT-5.4 variant optimized for speed and cost-efficiency"
39 }
40 ModelId::GPT54Mini => {
41 "Compact GPT-5.4 variant for cost-effective tasks with reduced reasoning overhead"
42 }
43 ModelId::GPT53Codex => {
44 "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
45 }
46 ModelId::GPT51Codex => {
47 "GPT-5.1 variant optimized for agentic coding tasks and software engineering workflows"
48 }
49 ModelId::GPT51CodexMax => {
50 "Higher-compute GPT-5.1 Codex variant optimized for longer-running engineering tasks"
51 }
52 ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
53 ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
54 ModelId::GPT5Codex => {
55 "GPT-5 variant optimized for agentic coding tasks and software engineering workflows"
56 }
57 ModelId::OpenAIGptOss20b => {
58 "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
59 }
60 ModelId::OpenAIGptOss120b => {
61 "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
62 }
63 ModelId::ClaudeOpus47 => {
65 "Next-gen Anthropic flagship with adaptive thinking and task budget support"
66 }
67 ModelId::ClaudeOpus46 => {
68 "Previous Anthropic flagship retained on VT Code's budgeted-thinking path for compatibility"
69 }
70 ModelId::ClaudeSonnet46 => {
71 "Balanced flagship model for coding with budgeted thinking in VT Code's current Anthropic rollout"
72 }
73 ModelId::ClaudeHaiku45 => {
74 "Latest efficient Anthropic model optimized for low-latency agent workflows"
75 }
76 ModelId::ClaudeMythosPreview => {
77 "Invitation-only Anthropic research preview for defensive cybersecurity workflows with adaptive thinking"
78 }
79 ModelId::CopilotAuto => {
80 "GitHub Copilot preview provider with automatic model selection via the official Copilot CLI"
81 }
82 ModelId::CopilotGPT52Codex => {
83 "GitHub Copilot GPT-5.2 Codex option for agentic software engineering workflows"
84 }
85 ModelId::CopilotGPT51CodexMax => {
86 "GitHub Copilot GPT-5.1 Codex Max option for longer-running engineering tasks"
87 }
88 ModelId::CopilotGPT54 => {
89 "GitHub Copilot GPT-5.4 option for complex professional work and long context"
90 }
91 ModelId::CopilotGPT54Mini => {
92 "GitHub Copilot GPT-5.4 Mini option for faster, lighter-weight tasks"
93 }
94 ModelId::CopilotClaudeSonnet46 => {
95 "GitHub Copilot Claude Sonnet 4.6 option for balanced coding and reasoning work"
96 }
97 ModelId::DeepSeekChat => {
99 "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
100 }
101 ModelId::DeepSeekReasoner => {
102 "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
103 }
104 ModelId::ZaiGlm5 => {
106 "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
107 }
108 ModelId::ZaiGlm51 => {
109 "Z.ai next-gen GLM-5.1 foundation model with improved reasoning and agent capabilities"
110 }
111 ModelId::OllamaGptOss20b => {
113 "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
114 }
115 ModelId::OllamaGptOss20bCloud => {
116 "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
117 }
118 ModelId::OllamaGptOss120bCloud => {
119 "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
120 }
121 ModelId::OllamaQwen317b => {
122 "Qwen3 1.7B served locally through Ollama without external API requirements"
123 }
124 ModelId::OllamaQwen3CoderNext => {
125 "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
126 }
127 ModelId::OllamaDeepseekV32Cloud => {
128 "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
129 }
130 ModelId::OllamaQwen3Next80bCloud => {
131 "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
132 }
133 ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
134 ModelId::OllamaGlm51Cloud => "Cloud-hosted GLM-5.1 model served via Ollama Cloud",
135 ModelId::OllamaMinimaxM25Cloud => {
136 "Exceptional multilingual capabilities to elevate code engineering"
137 }
138 ModelId::OllamaGemini3FlashPreviewCloud => {
139 "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
140 }
141 ModelId::OllamaKimiK26Cloud => "Cloud-hosted Kimi K2.6 model served via Ollama Cloud",
142 ModelId::OllamaNemotron3SuperCloud => {
143 "NVIDIA Nemotron™ is a family of open models delivering leading efficiency and accuracy for building specialized AI agents. Nemotron-3-Super (120B) is optimized for collaborative agents and high-volume workloads."
144 }
145 ModelId::OllamaMinimaxM2Cloud => {
146 "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
147 }
148 ModelId::OllamaMinimaxM27Cloud => {
149 "Cloud-hosted MiniMax-M2.7 model accessed through Ollama Cloud for reasoning tasks"
150 }
151 ModelId::MinimaxM27 => {
152 "Beginning the journey of recursive self-improvement with 204.8K context and strong reasoning/coding performance"
153 }
154 ModelId::MinimaxM25 => {
155 "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
156 }
157 ModelId::MoonshotKimiK26 => {
158 "Kimi K2.6 - Moonshot.ai's latest 1T MoE flagship with 32B active parameters, MLA attention, and MoonViT vision"
159 }
160 ModelId::MoonshotKimiK25 => {
161 "Kimi K2.5 - Moonshot.ai's previous flagship reasoning model"
162 }
163 ModelId::OpenCodeZenGPT54 => {
164 "OpenCode Zen flagship GPT-5.4 route using OpenCode's curated pay-as-you-go gateway"
165 }
166 ModelId::OpenCodeZenGPT54Mini => {
167 "Lower-cost OpenCode Zen GPT-5.4 Mini option for faster and cheaper tasks"
168 }
169 ModelId::OpenCodeZenClaudeSonnet46 => {
170 "Claude Sonnet 4.6 served through OpenCode Zen's curated Anthropic endpoint"
171 }
172 ModelId::OpenCodeZenGlm51 => {
173 "GLM-5.1 served through OpenCode Zen for lower-cost reasoning and coding work"
174 }
175 ModelId::OpenCodeZenKimiK25 => {
176 "Kimi K2.5 served through OpenCode Zen's curated open-model gateway"
177 }
178 ModelId::OpenCodeGoGlm51 => {
179 "GLM-5.1 included with the OpenCode Go subscription for open-model coding workflows"
180 }
181 ModelId::OpenCodeGoKimiK25 => "Kimi K2.5 included with the OpenCode Go subscription",
182 ModelId::OpenCodeGoMinimaxM25 => {
183 "MiniMax-M2.5 included with the OpenCode Go subscription"
184 }
185 ModelId::OpenCodeGoMinimaxM27 => {
186 "MiniMax-M2.7 included with the OpenCode Go subscription for stronger agentic coding"
187 }
188 ModelId::HuggingFaceDeepseekV32 => {
189 "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
190 }
191 ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
192 ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
193 ModelId::HuggingFaceMinimaxM25Novita => {
194 "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
195 }
196 ModelId::HuggingFaceDeepseekV32Novita => {
197 "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
198 }
199 ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
200 "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
201 }
202 ModelId::HuggingFaceGlm5Novita => {
203 "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
204 }
205 ModelId::HuggingFaceGlm51ZaiOrg => {
206 "Z.ai GLM-5.1 model via zai-org inference provider on HuggingFace router."
207 }
208 ModelId::HuggingFaceQwen3CoderNextNovita => {
209 "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
210 }
211 ModelId::HuggingFaceQwen35397BA17BTogether => {
212 "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
213 }
214 ModelId::HuggingFaceKimiK26Novita => {
215 "Kimi K2.6 via Novita inference provider on HuggingFace router."
216 }
217 ModelId::HuggingFaceStep35Flash => {
218 "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
219 }
220 ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
221 ModelId::OpenRouterQwen3CoderNext => {
222 "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
223 }
224 ModelId::OpenRouterMoonshotaiKimiK26 => {
225 "Kimi K2.6 multimodal agentic model for long-horizon coding and design via OpenRouter"
226 }
227 _ => unreachable!(),
228 }
229 }
230}