vtcode_config/models/model_id/
description.rs1use super::ModelId;
2
3impl ModelId {
4 pub fn description(&self) -> &'static str {
6 if let Some(meta) = self.openrouter_metadata() {
7 return meta.description;
8 }
9 match self {
10 ModelId::Gemini31ProPreview => {
12 "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13 }
14 ModelId::Gemini31ProPreviewCustomTools => {
15 "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16 }
17 ModelId::Gemini3ProPreview => {
18 "Preview of next-generation Gemini 3 Pro model with advanced reasoning and capabilities"
19 }
20 ModelId::Gemini3FlashPreview => {
21 "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22 }
23 ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25 ModelId::GPT52 => {
26 "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27 }
28 ModelId::GPT52Codex => {
29 "GPT-5.2 variant optimized for agentic coding tasks with reasoning effort support"
30 }
31 ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
32 ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
33 ModelId::GPT53Codex => {
34 "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
35 }
36 ModelId::OpenAIGptOss20b => {
37 "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
38 }
39 ModelId::OpenAIGptOss120b => {
40 "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
41 }
42 ModelId::ClaudeOpus46 => {
44 "Next-gen Anthropic flagship with extended and adaptive thinking support"
45 }
46 ModelId::ClaudeSonnet46 => {
47 "Balanced flagship model for coding with extended and adaptive thinking support"
48 }
49 ModelId::ClaudeOpus45 => {
50 "Latest flagship Anthropic model with exceptional reasoning capabilities"
51 }
52 ModelId::ClaudeOpus41 => {
53 "Latest flagship Anthropic model with exceptional reasoning capabilities"
54 }
55 ModelId::ClaudeSonnet45 => "Latest balanced Anthropic model for general tasks",
56 ModelId::ClaudeHaiku45 => {
57 "Latest efficient Anthropic model optimized for low-latency agent workflows"
58 }
59 ModelId::ClaudeSonnet4 => {
60 "Previous balanced Anthropic model maintained for compatibility"
61 }
62 ModelId::ClaudeOpus4 => "Previous generation premium flagship model",
63 ModelId::ClaudeSonnet37 => "Latest model in the Claude 3 family with extended thinking",
64 ModelId::ClaudeHaiku35 => "Highly efficient model for high-volume tasks",
65 ModelId::DeepSeekChat => {
67 "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
68 }
69 ModelId::DeepSeekReasoner => {
70 "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
71 }
72 ModelId::XaiGrok4 => "Flagship Grok 4 model with long context and tool use",
74 ModelId::XaiGrok4Mini => "Efficient Grok 4 Mini tuned for low latency",
75 ModelId::XaiGrok4Code => "Code-specialized Grok 4 deployment with tool support",
76 ModelId::XaiGrok4CodeLatest => {
77 "Latest Grok 4 code model offering enhanced reasoning traces"
78 }
79 ModelId::XaiGrok4Vision => "Multimodal Grok 4 model with image understanding",
80 ModelId::ZaiGlm5 => {
82 "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
83 }
84 ModelId::OllamaGptOss20b => {
86 "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
87 }
88 ModelId::OllamaGptOss20bCloud => {
89 "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
90 }
91 ModelId::OllamaGptOss120bCloud => {
92 "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
93 }
94 ModelId::OllamaQwen317b => {
95 "Qwen3 1.7B served locally through Ollama without external API requirements"
96 }
97 ModelId::OllamaDeepseekV32Cloud => {
98 "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
99 }
100 ModelId::OllamaQwen3Next80bCloud => {
101 "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
102 }
103 ModelId::OllamaMistralLarge3675bCloud => {
104 "Mistral Large 3 675B reasoning model via Ollama Cloud for complex problem-solving"
105 }
106 ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
107 ModelId::OllamaMinimaxM25Cloud => {
108 "Exceptional multilingual capabilities to elevate code engineering"
109 }
110 ModelId::OllamaGemini3FlashPreviewCloud => {
111 "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
112 }
113 ModelId::OllamaGemini3ProPreviewLatestCloud => {
114 "Gemini 3 Pro Preview Latest offers advanced reasoning and long context capabilities."
115 }
116 ModelId::OllamaNemotron3Nano30bCloud => {
117 "NVIDIA Nemotron-3-Nano 30B brings efficient excellence to code"
118 }
119 ModelId::OllamaQwen3Coder480bCloud => {
120 "Cloud-hosted Qwen3 Coder 480B model accessed through Ollama Cloud for coding tasks"
121 }
122 ModelId::OllamaMinimaxM2Cloud => {
123 "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
124 }
125 ModelId::LmStudioMetaLlama38BInstruct => {
126 "Meta Llama 3 8B running through LM Studio's local OpenAI-compatible server"
127 }
128 ModelId::LmStudioMetaLlama318BInstruct => {
129 "Meta Llama 3.1 8B running through LM Studio's local OpenAI-compatible server"
130 }
131 ModelId::LmStudioQwen257BInstruct => {
132 "Qwen2.5 7B hosted in LM Studio for local experimentation and coding tasks"
133 }
134 ModelId::LmStudioGemma22BIt => {
135 "Gemma 2 2B IT deployed via LM Studio for lightweight on-device assistance"
136 }
137 ModelId::LmStudioGemma29BIt => {
138 "Gemma 2 9B IT served locally via LM Studio when you need additional capacity"
139 }
140 ModelId::LmStudioPhi31Mini4kInstruct => {
141 "Phi-3.1 Mini 4K hosted in LM Studio for compact reasoning and experimentation"
142 }
143 ModelId::MinimaxM25 => {
144 "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
145 }
146 ModelId::MinimaxM2 => {
147 "MiniMax-M2 via Anthropic-compatible API with reasoning and tool use"
148 }
149 ModelId::MoonshotMinimaxM25 => "MiniMax-M2.5 served via Moonshot API",
150 ModelId::MoonshotQwen3CoderNext => "Qwen3 Coder Next model served via Moonshot API",
151 ModelId::HuggingFaceDeepseekV32 => {
152 "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
153 }
154 ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
155 ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
156 ModelId::HuggingFaceMinimaxM25Novita => {
157 "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
158 }
159 ModelId::HuggingFaceDeepseekV32Novita => {
160 "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
161 }
162 ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
163 "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
164 }
165 ModelId::HuggingFaceGlm5Novita => {
166 "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
167 }
168 ModelId::HuggingFaceQwen3CoderNextNovita => {
169 "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
170 }
171 ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
172 ModelId::OpenRouterQwen3CoderNext => {
173 "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
174 }
175 _ => unreachable!(),
176 }
177 }
178}