Skip to main content

vtcode_config/models/model_id/
description.rs

1use super::ModelId;
2
3impl ModelId {
4    /// Get a description of the model's characteristics
5    pub fn description(&self) -> &'static str {
6        if let Some(meta) = self.openrouter_metadata() {
7            return meta.description;
8        }
9        match self {
10            // Gemini models
11            ModelId::Gemini31ProPreview => {
12                "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13            }
14            ModelId::Gemini31ProPreviewCustomTools => {
15                "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16            }
17            ModelId::Gemini31FlashLitePreview => {
18                "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19            }
20            ModelId::Gemini3FlashPreview => {
21                "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22            }
23            // OpenAI models
24            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25            ModelId::GPT52 => {
26                "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27            }
28            ModelId::GPT52Codex => {
29                "GPT-5.2 Codex variant optimized for agentic coding tasks with xhigh reasoning support"
30            }
31            ModelId::GPT54 => {
32                "Mainline frontier GPT model for general-purpose work, coding, long context, and multi-step agents"
33            }
34            ModelId::GPT54Pro => {
35                "Higher-compute GPT-5.4 variant for tougher problems with deeper reasoning"
36            }
37            ModelId::GPT53Codex => {
38                "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
39            }
40            ModelId::GPT51Codex => {
41                "GPT-5.1 variant optimized for agentic coding tasks and software engineering workflows"
42            }
43            ModelId::GPT51CodexMax => {
44                "Higher-compute GPT-5.1 Codex variant optimized for longer-running engineering tasks"
45            }
46            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
47            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
48            ModelId::GPT5Codex => {
49                "GPT-5 variant optimized for agentic coding tasks and software engineering workflows"
50            }
51            ModelId::OpenAIGptOss20b => {
52                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
53            }
54            ModelId::OpenAIGptOss120b => {
55                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
56            }
57            // Anthropic models
58            ModelId::ClaudeOpus46 => {
59                "Next-gen Anthropic flagship with extended and adaptive thinking support"
60            }
61            ModelId::ClaudeSonnet46 => {
62                "Balanced flagship model for coding with extended and adaptive thinking support"
63            }
64            ModelId::ClaudeHaiku45 => {
65                "Latest efficient Anthropic model optimized for low-latency agent workflows"
66            }
67            // DeepSeek models
68            ModelId::DeepSeekChat => {
69                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
70            }
71            ModelId::DeepSeekReasoner => {
72                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
73            }
74            // Z.AI models
75            ModelId::ZaiGlm5 => {
76                "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
77            }
78            // Ollama models
79            ModelId::OllamaGptOss20b => {
80                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
81            }
82            ModelId::OllamaGptOss20bCloud => {
83                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
84            }
85            ModelId::OllamaGptOss120bCloud => {
86                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
87            }
88            ModelId::OllamaQwen317b => {
89                "Qwen3 1.7B served locally through Ollama without external API requirements"
90            }
91            ModelId::OllamaQwen3CoderNext => {
92                "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
93            }
94            ModelId::OllamaDeepseekV32Cloud => {
95                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
96            }
97            ModelId::OllamaQwen3Next80bCloud => {
98                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
99            }
100            ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
101            ModelId::OllamaMinimaxM25Cloud => {
102                "Exceptional multilingual capabilities to elevate code engineering"
103            }
104            ModelId::OllamaGemini3FlashPreviewCloud => {
105                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
106            }
107            ModelId::OllamaNemotron3SuperCloud => {
108                "NVIDIA Nemotron™ is a family of open models delivering leading efficiency and accuracy for building specialized AI agents. Nemotron-3-Super (120B) is optimized for collaborative agents and high-volume workloads."
109            }
110            ModelId::OllamaMinimaxM2Cloud => {
111                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
112            }
113            ModelId::MinimaxM25 => {
114                "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
115            }
116            ModelId::MoonshotKimiK25 => "Kimi K2.5 - Moonshot.ai's flagship reasoning model",
117            ModelId::HuggingFaceDeepseekV32 => {
118                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
119            }
120            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
121            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
122            ModelId::HuggingFaceMinimaxM25Novita => {
123                "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
124            }
125            ModelId::HuggingFaceDeepseekV32Novita => {
126                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
127            }
128            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
129                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
130            }
131            ModelId::HuggingFaceGlm5Novita => {
132                "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
133            }
134            ModelId::HuggingFaceQwen3CoderNextNovita => {
135                "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
136            }
137            ModelId::HuggingFaceQwen35397BA17BTogether => {
138                "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
139            }
140            ModelId::HuggingFaceStep35Flash => {
141                "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
142            }
143            ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
144            ModelId::OpenRouterQwen3CoderNext => {
145                "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
146            }
147            _ => unreachable!(),
148        }
149    }
150}