Skip to main content

vtcode_config/models/model_id/
description.rs

1use super::ModelId;
2
3impl ModelId {
4    /// Get a description of the model's characteristics
5    pub fn description(&self) -> &'static str {
6        if let Some(meta) = self.openrouter_metadata() {
7            return meta.description;
8        }
9        match self {
10            // Gemini models
11            ModelId::Gemini31ProPreview => {
12                "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13            }
14            ModelId::Gemini31ProPreviewCustomTools => {
15                "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16            }
17            ModelId::Gemini31FlashLitePreview => {
18                "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19            }
20            ModelId::Gemini3FlashPreview => {
21                "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22            }
23            // OpenAI models
24            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25            ModelId::GPT52 => {
26                "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27            }
28            ModelId::GPT52Codex => {
29                "GPT-5.2 Codex variant optimized for agentic coding tasks with xhigh reasoning support"
30            }
31            ModelId::GPT54 => {
32                "Mainline frontier GPT model for general-purpose work, coding, long context, and multi-step agents"
33            }
34            ModelId::GPT54Pro => {
35                "Higher-compute GPT-5.4 variant for tougher problems with deeper reasoning"
36            }
37            ModelId::GPT54Nano => {
38                "Lightweight GPT-5.4 variant optimized for speed and cost-efficiency"
39            }
40            ModelId::GPT54Mini => {
41                "Compact GPT-5.4 variant for cost-effective tasks with reduced reasoning overhead"
42            }
43            ModelId::GPT53Codex => {
44                "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
45            }
46            ModelId::GPT51Codex => {
47                "GPT-5.1 variant optimized for agentic coding tasks and software engineering workflows"
48            }
49            ModelId::GPT51CodexMax => {
50                "Higher-compute GPT-5.1 Codex variant optimized for longer-running engineering tasks"
51            }
52            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
53            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
54            ModelId::GPT5Codex => {
55                "GPT-5 variant optimized for agentic coding tasks and software engineering workflows"
56            }
57            ModelId::OpenAIGptOss20b => {
58                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
59            }
60            ModelId::OpenAIGptOss120b => {
61                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
62            }
63            // Anthropic models
64            ModelId::ClaudeOpus46 => {
65                "Next-gen Anthropic flagship with extended and adaptive thinking support"
66            }
67            ModelId::ClaudeSonnet46 => {
68                "Balanced flagship model for coding with extended and adaptive thinking support"
69            }
70            ModelId::ClaudeHaiku45 => {
71                "Latest efficient Anthropic model optimized for low-latency agent workflows"
72            }
73            // DeepSeek models
74            ModelId::DeepSeekChat => {
75                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
76            }
77            ModelId::DeepSeekReasoner => {
78                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
79            }
80            // Z.AI models
81            ModelId::ZaiGlm5 => {
82                "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
83            }
84            // Ollama models
85            ModelId::OllamaGptOss20b => {
86                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
87            }
88            ModelId::OllamaGptOss20bCloud => {
89                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
90            }
91            ModelId::OllamaGptOss120bCloud => {
92                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
93            }
94            ModelId::OllamaQwen317b => {
95                "Qwen3 1.7B served locally through Ollama without external API requirements"
96            }
97            ModelId::OllamaQwen3CoderNext => {
98                "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
99            }
100            ModelId::OllamaDeepseekV32Cloud => {
101                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
102            }
103            ModelId::OllamaQwen3Next80bCloud => {
104                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
105            }
106            ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
107            ModelId::OllamaMinimaxM25Cloud => {
108                "Exceptional multilingual capabilities to elevate code engineering"
109            }
110            ModelId::OllamaGemini3FlashPreviewCloud => {
111                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
112            }
113            ModelId::OllamaNemotron3SuperCloud => {
114                "NVIDIA Nemotron™ is a family of open models delivering leading efficiency and accuracy for building specialized AI agents. Nemotron-3-Super (120B) is optimized for collaborative agents and high-volume workloads."
115            }
116            ModelId::OllamaMinimaxM2Cloud => {
117                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
118            }
119            ModelId::MinimaxM27 => {
120                "Beginning the journey of recursive self-improvement with 204.8K context and strong reasoning/coding performance"
121            }
122            ModelId::MinimaxM25 => {
123                "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
124            }
125            ModelId::MoonshotKimiK25 => "Kimi K2.5 - Moonshot.ai's flagship reasoning model",
126            ModelId::HuggingFaceDeepseekV32 => {
127                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
128            }
129            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
130            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
131            ModelId::HuggingFaceMinimaxM25Novita => {
132                "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
133            }
134            ModelId::HuggingFaceDeepseekV32Novita => {
135                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
136            }
137            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
138                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
139            }
140            ModelId::HuggingFaceGlm5Novita => {
141                "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
142            }
143            ModelId::HuggingFaceQwen3CoderNextNovita => {
144                "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
145            }
146            ModelId::HuggingFaceQwen35397BA17BTogether => {
147                "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
148            }
149            ModelId::HuggingFaceStep35Flash => {
150                "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
151            }
152            ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
153            ModelId::OpenRouterQwen3CoderNext => {
154                "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
155            }
156            _ => unreachable!(),
157        }
158    }
159}