Skip to main content

vtcode_config/models/model_id/
description.rs

1use super::ModelId;
2
3impl ModelId {
4    /// Get a description of the model's characteristics
5    pub fn description(&self) -> &'static str {
6        if let Some(meta) = self.openrouter_metadata() {
7            return meta.description;
8        }
9        match self {
10            // Gemini models
11            ModelId::Gemini31ProPreview => {
12                "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13            }
14            ModelId::Gemini31ProPreviewCustomTools => {
15                "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16            }
17            ModelId::Gemini3ProPreview => {
18                "Preview of next-generation Gemini 3 Pro model with advanced reasoning and capabilities"
19            }
20            ModelId::Gemini3FlashPreview => {
21                "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22            }
23            // OpenAI models
24            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25            ModelId::GPT52 => {
26                "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27            }
28            ModelId::GPT52Codex => {
29                "GPT-5.2 variant optimized for agentic coding tasks with reasoning effort support"
30            }
31            ModelId::GPT5Codex => {
32                "Code-focused GPT-5 variant optimized for tool calling and structured outputs"
33            }
34            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
35            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
36            ModelId::GPT51 => {
37                "Enhanced most capable OpenAI model with improved reasoning and capabilities"
38            }
39            ModelId::GPT51Codex => {
40                "Code-focused GPT-5.1 variant optimized for tool calling and structured outputs"
41            }
42            ModelId::GPT51CodexMax => {
43                "Maximum context code-focused GPT-5.1 variant optimized for large codebases"
44            }
45            ModelId::GPT51Mini => "Enhanced efficient OpenAI model with improved capabilities",
46            ModelId::CodexMiniLatest => "Latest Codex model optimized for code generation",
47            ModelId::OpenAIGptOss20b => {
48                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
49            }
50            ModelId::OpenAIGptOss120b => {
51                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
52            }
53            // Anthropic models
54            ModelId::ClaudeOpus46 => {
55                "Next-gen Anthropic flagship with extended and adaptive thinking support"
56            }
57            ModelId::ClaudeSonnet46 => {
58                "Balanced flagship model for coding with extended and adaptive thinking support"
59            }
60            ModelId::ClaudeOpus45 => {
61                "Latest flagship Anthropic model with exceptional reasoning capabilities"
62            }
63            ModelId::ClaudeOpus41 => {
64                "Latest flagship Anthropic model with exceptional reasoning capabilities"
65            }
66            ModelId::ClaudeSonnet45 => "Latest balanced Anthropic model for general tasks",
67            ModelId::ClaudeHaiku45 => {
68                "Latest efficient Anthropic model optimized for low-latency agent workflows"
69            }
70            ModelId::ClaudeSonnet4 => {
71                "Previous balanced Anthropic model maintained for compatibility"
72            }
73            ModelId::ClaudeOpus4 => "Previous generation premium flagship model",
74            ModelId::ClaudeSonnet37 => "Latest model in the Claude 3 family with extended thinking",
75            ModelId::ClaudeHaiku35 => "Highly efficient model for high-volume tasks",
76            // DeepSeek models
77            ModelId::DeepSeekChat => {
78                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
79            }
80            ModelId::DeepSeekReasoner => {
81                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
82            }
83            // xAI models
84            ModelId::XaiGrok4 => "Flagship Grok 4 model with long context and tool use",
85            ModelId::XaiGrok4Mini => "Efficient Grok 4 Mini tuned for low latency",
86            ModelId::XaiGrok4Code => "Code-specialized Grok 4 deployment with tool support",
87            ModelId::XaiGrok4CodeLatest => {
88                "Latest Grok 4 code model offering enhanced reasoning traces"
89            }
90            ModelId::XaiGrok4Vision => "Multimodal Grok 4 model with image understanding",
91            // Z.AI models
92            ModelId::ZaiGlm5 => {
93                "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
94            }
95            // Ollama models
96            ModelId::OllamaGptOss20b => {
97                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
98            }
99            ModelId::OllamaGptOss20bCloud => {
100                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
101            }
102            ModelId::OllamaGptOss120bCloud => {
103                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
104            }
105            ModelId::OllamaQwen317b => {
106                "Qwen3 1.7B served locally through Ollama without external API requirements"
107            }
108            ModelId::OllamaDeepseekV32Cloud => {
109                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
110            }
111            ModelId::OllamaQwen3Next80bCloud => {
112                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
113            }
114            ModelId::OllamaMistralLarge3675bCloud => {
115                "Mistral Large 3 675B reasoning model via Ollama Cloud for complex problem-solving"
116            }
117            ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
118            ModelId::OllamaMinimaxM25Cloud => {
119                "Exceptional multilingual capabilities to elevate code engineering"
120            }
121            ModelId::OllamaGemini3FlashPreviewCloud => {
122                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
123            }
124            ModelId::OllamaGemini3ProPreviewLatestCloud => {
125                "Gemini 3 Pro Preview Latest offers advanced reasoning and long context capabilities."
126            }
127            ModelId::OllamaNemotron3Nano30bCloud => {
128                "NVIDIA Nemotron-3-Nano 30B brings efficient excellence to code"
129            }
130            ModelId::OllamaQwen3Coder480bCloud => {
131                "Cloud-hosted Qwen3 Coder 480B model accessed through Ollama Cloud for coding tasks"
132            }
133            ModelId::OllamaMinimaxM2Cloud => {
134                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
135            }
136            ModelId::LmStudioMetaLlama38BInstruct => {
137                "Meta Llama 3 8B running through LM Studio's local OpenAI-compatible server"
138            }
139            ModelId::LmStudioMetaLlama318BInstruct => {
140                "Meta Llama 3.1 8B running through LM Studio's local OpenAI-compatible server"
141            }
142            ModelId::LmStudioQwen257BInstruct => {
143                "Qwen2.5 7B hosted in LM Studio for local experimentation and coding tasks"
144            }
145            ModelId::LmStudioGemma22BIt => {
146                "Gemma 2 2B IT deployed via LM Studio for lightweight on-device assistance"
147            }
148            ModelId::LmStudioGemma29BIt => {
149                "Gemma 2 9B IT served locally via LM Studio when you need additional capacity"
150            }
151            ModelId::LmStudioPhi31Mini4kInstruct => {
152                "Phi-3.1 Mini 4K hosted in LM Studio for compact reasoning and experimentation"
153            }
154            ModelId::MinimaxM25 => {
155                "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
156            }
157            ModelId::MinimaxM2 => {
158                "MiniMax-M2 via Anthropic-compatible API with reasoning and tool use"
159            }
160            ModelId::MoonshotMinimaxM25 => "MiniMax-M2.5 served via Moonshot API",
161            ModelId::MoonshotQwen3CoderNext => "Qwen3 Coder Next model served via Moonshot API",
162            ModelId::HuggingFaceDeepseekV32 => {
163                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
164            }
165            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
166            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
167            ModelId::HuggingFaceMinimaxM25Novita => {
168                "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
169            }
170            ModelId::HuggingFaceDeepseekV32Novita => {
171                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
172            }
173            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
174                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
175            }
176            ModelId::HuggingFaceGlm5Novita => {
177                "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
178            }
179            ModelId::HuggingFaceQwen3CoderNextNovita => {
180                "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
181            }
182            ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
183            ModelId::OpenRouterQwen3CoderNext => {
184                "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
185            }
186            _ => unreachable!(),
187        }
188    }
189}