Skip to main content

vtcode_config/models/model_id/
description.rs

1use super::ModelId;
2
3impl ModelId {
4    /// Get a description of the model's characteristics
5    pub fn description(&self) -> &'static str {
6        if let Some(meta) = self.openrouter_metadata() {
7            return meta.description;
8        }
9        match self {
10            // Gemini models
11            ModelId::Gemini31ProPreview => {
12                "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13            }
14            ModelId::Gemini31ProPreviewCustomTools => {
15                "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16            }
17            ModelId::Gemini31FlashLitePreview => {
18                "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19            }
20            ModelId::Gemini3FlashPreview => {
21                "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22            }
23            // OpenAI models
24            ModelId::GPT55 => {
25                "Next-gen OpenAI model with frontier reasoning and long context (2026-04-23 dated release)"
26            }
27            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
28            ModelId::GPT52 => {
29                "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
30            }
31            ModelId::GPT52Codex => {
32                "GPT-5.2 Codex variant optimized for agentic coding tasks with xhigh reasoning support"
33            }
34            ModelId::GPT54 => {
35                "Mainline frontier GPT model for general-purpose work, coding, long context, and multi-step agents"
36            }
37            ModelId::GPT54Pro => {
38                "Higher-compute GPT-5.4 variant for tougher problems with deeper reasoning"
39            }
40            ModelId::GPT54Nano => {
41                "Lightweight GPT-5.4 variant optimized for speed and cost-efficiency"
42            }
43            ModelId::GPT54Mini => {
44                "Compact GPT-5.4 variant for cost-effective tasks with reduced reasoning overhead"
45            }
46            ModelId::GPT53Codex => {
47                "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
48            }
49            ModelId::GPT51Codex => {
50                "GPT-5.1 variant optimized for agentic coding tasks and software engineering workflows"
51            }
52            ModelId::GPT51CodexMax => {
53                "Higher-compute GPT-5.1 Codex variant optimized for longer-running engineering tasks"
54            }
55            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
56            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
57            ModelId::GPT5Codex => {
58                "GPT-5 variant optimized for agentic coding tasks and software engineering workflows"
59            }
60            ModelId::OpenAIGptOss20b => {
61                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
62            }
63            ModelId::OpenAIGptOss120b => {
64                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
65            }
66            // Anthropic models
67            ModelId::ClaudeOpus47 => {
68                "Next-gen Anthropic flagship with adaptive thinking and task budget support"
69            }
70            ModelId::ClaudeOpus46 => {
71                "Previous Anthropic flagship retained on VT Code's budgeted-thinking path for compatibility"
72            }
73            ModelId::ClaudeSonnet46 => {
74                "Balanced flagship model for coding with budgeted thinking in VT Code's current Anthropic rollout"
75            }
76            ModelId::ClaudeHaiku45 => {
77                "Latest efficient Anthropic model optimized for low-latency agent workflows"
78            }
79            ModelId::ClaudeMythosPreview => {
80                "Invitation-only Anthropic research preview for defensive cybersecurity workflows with adaptive thinking"
81            }
82            ModelId::CopilotAuto => {
83                "GitHub Copilot preview provider with automatic model selection via the official Copilot CLI"
84            }
85            ModelId::CopilotGPT52Codex => {
86                "GitHub Copilot GPT-5.2 Codex option for agentic software engineering workflows"
87            }
88            ModelId::CopilotGPT51CodexMax => {
89                "GitHub Copilot GPT-5.1 Codex Max option for longer-running engineering tasks"
90            }
91            ModelId::CopilotGPT54 => {
92                "GitHub Copilot GPT-5.4 option for complex professional work and long context"
93            }
94            ModelId::CopilotGPT54Mini => {
95                "GitHub Copilot GPT-5.4 Mini option for faster, lighter-weight tasks"
96            }
97            ModelId::CopilotClaudeSonnet46 => {
98                "GitHub Copilot Claude Sonnet 4.6 option for balanced coding and reasoning work"
99            }
100            // DeepSeek models
101            ModelId::DeepSeekV4Pro => {
102                "DeepSeek V4 Pro - High-performance reasoning model with advanced thinking capabilities (1M context, 384K max output)"
103            }
104            ModelId::DeepSeekV4Flash => {
105                "DeepSeek V4 Flash - Fast inference model for cost-effective reasoning tasks (1M context, 384K max output)"
106            }
107            // Z.AI models
108            ModelId::ZaiGlm5 => {
109                "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
110            }
111            ModelId::ZaiGlm51 => {
112                "Z.ai next-gen GLM-5.1 foundation model with improved reasoning and agent capabilities"
113            }
114            // Ollama models
115            ModelId::OllamaGptOss20b => {
116                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
117            }
118            ModelId::OllamaGptOss20bCloud => {
119                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
120            }
121            ModelId::OllamaGptOss120bCloud => {
122                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
123            }
124            ModelId::OllamaQwen317b => {
125                "Qwen3 1.7B served locally through Ollama without external API requirements"
126            }
127            ModelId::OllamaQwen3CoderNext => {
128                "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
129            }
130            ModelId::OllamaDeepseekV32Cloud => {
131                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
132            }
133            ModelId::OllamaDeepseekV4FlashCloud => {
134                "DeepSeek V4 Flash cloud deployment via Ollama with fast inference and efficient reasoning"
135            }
136            ModelId::OllamaDeepseekV4ProCloud => {
137                "DeepSeek V4 Pro cloud deployment via Ollama with advanced thinking and strong reasoning"
138            }
139            ModelId::OllamaQwen3Next80bCloud => {
140                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
141            }
142            ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
143            ModelId::OllamaGlm51Cloud => "Cloud-hosted GLM-5.1 model served via Ollama Cloud",
144            ModelId::OllamaMinimaxM25Cloud => {
145                "Exceptional multilingual capabilities to elevate code engineering"
146            }
147            ModelId::OllamaGemini3FlashPreviewCloud => {
148                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
149            }
150            ModelId::OllamaKimiK26Cloud => "Cloud-hosted Kimi K2.6 model served via Ollama Cloud",
151            ModelId::OllamaNemotron3SuperCloud => {
152                "NVIDIA Nemotron™ is a family of open models delivering leading efficiency and accuracy for building specialized AI agents. Nemotron-3-Super (120B) is optimized for collaborative agents and high-volume workloads."
153            }
154            ModelId::OllamaMinimaxM2Cloud => {
155                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
156            }
157            ModelId::OllamaMinimaxM27Cloud => {
158                "Cloud-hosted MiniMax-M2.7 model accessed through Ollama Cloud for reasoning tasks"
159            }
160            ModelId::MinimaxM27 => {
161                "Beginning the journey of recursive self-improvement with 204.8K context and strong reasoning/coding performance"
162            }
163            ModelId::MinimaxM25 => {
164                "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
165            }
166            ModelId::MoonshotKimiK26 => {
167                "Kimi K2.6 - Moonshot.ai's latest 1T MoE flagship with 32B active parameters, MLA attention, and MoonViT vision"
168            }
169            ModelId::MoonshotKimiK25 => {
170                "Kimi K2.5 - Moonshot.ai's previous flagship reasoning model"
171            }
172            ModelId::OpenCodeZenGPT54 => {
173                "OpenCode Zen flagship GPT-5.4 route using OpenCode's curated pay-as-you-go gateway"
174            }
175            ModelId::OpenCodeZenGPT54Mini => {
176                "Lower-cost OpenCode Zen GPT-5.4 Mini option for faster and cheaper tasks"
177            }
178            ModelId::OpenCodeZenClaudeSonnet46 => {
179                "Claude Sonnet 4.6 served through OpenCode Zen's curated Anthropic endpoint"
180            }
181            ModelId::OpenCodeZenGlm51 => {
182                "GLM-5.1 served through OpenCode Zen for lower-cost reasoning and coding work"
183            }
184            ModelId::OpenCodeZenKimiK25 => {
185                "Kimi K2.5 served through OpenCode Zen's curated open-model gateway"
186            }
187            ModelId::OpenCodeGoGlm51 => {
188                "GLM-5.1 included with the OpenCode Go subscription for open-model coding workflows"
189            }
190            ModelId::OpenCodeGoKimiK25 => "Kimi K2.5 included with the OpenCode Go subscription",
191            ModelId::OpenCodeGoMinimaxM25 => {
192                "MiniMax-M2.5 included with the OpenCode Go subscription"
193            }
194            ModelId::OpenCodeGoMinimaxM27 => {
195                "MiniMax-M2.7 included with the OpenCode Go subscription for stronger agentic coding"
196            }
197            ModelId::HuggingFaceDeepseekV32 => {
198                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
199            }
200            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
201            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
202            ModelId::HuggingFaceMinimaxM25Novita => {
203                "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
204            }
205            ModelId::HuggingFaceDeepseekV32Novita => {
206                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
207            }
208            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
209                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
210            }
211            ModelId::HuggingFaceGlm5Novita => {
212                "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
213            }
214            ModelId::HuggingFaceGlm51ZaiOrg => {
215                "Z.ai GLM-5.1 model via zai-org inference provider on HuggingFace router."
216            }
217            ModelId::HuggingFaceQwen3CoderNextNovita => {
218                "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
219            }
220            ModelId::HuggingFaceQwen35397BA17BTogether => {
221                "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
222            }
223            ModelId::HuggingFaceKimiK26Novita => {
224                "Kimi K2.6 via Novita inference provider on HuggingFace router."
225            }
226            ModelId::HuggingFaceStep35Flash => {
227                "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
228            }
229            ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
230            ModelId::OpenRouterQwen3CoderNext => {
231                "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
232            }
233            ModelId::OpenRouterMoonshotaiKimiK26 => {
234                "Kimi K2.6 multimodal agentic model for long-horizon coding and design via OpenRouter"
235            }
236            ModelId::OpenRouterOpenAIGpt55 => "OpenAI GPT-5.5 model accessed through OpenRouter",
237            _ => unreachable!(),
238        }
239    }
240}