vtcode_config/models/model_id/
description.rs1use super::ModelId;
2
3impl ModelId {
4 pub fn description(&self) -> &'static str {
6 if let Some(meta) = self.openrouter_metadata() {
7 return meta.description;
8 }
9 match self {
10 ModelId::Gemini31ProPreview => {
12 "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13 }
14 ModelId::Gemini31ProPreviewCustomTools => {
15 "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16 }
17 ModelId::Gemini31FlashLitePreview => {
18 "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19 }
20 ModelId::Gemini3FlashPreview => {
21 "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22 }
23 ModelId::GPT => {
25 "Rolling alias for the latest GPT flagship model, recommended default for broad general-purpose work"
26 }
27 ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
28 ModelId::GPT52 => {
29 "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
30 }
31 ModelId::GPT54 => {
32 "Mainline frontier GPT model for general-purpose work, coding, long context, and multi-step agents"
33 }
34 ModelId::GPT54Pro => {
35 "Higher-compute GPT-5.4 variant for tougher problems with deeper reasoning"
36 }
37 ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
38 ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
39 ModelId::GPT53Codex => {
40 "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
41 }
42 ModelId::OpenAIGptOss20b => {
43 "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
44 }
45 ModelId::OpenAIGptOss120b => {
46 "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
47 }
48 ModelId::ClaudeOpus46 => {
50 "Next-gen Anthropic flagship with extended and adaptive thinking support"
51 }
52 ModelId::ClaudeSonnet46 => {
53 "Balanced flagship model for coding with extended and adaptive thinking support"
54 }
55 ModelId::ClaudeHaiku45 => {
56 "Latest efficient Anthropic model optimized for low-latency agent workflows"
57 }
58 ModelId::DeepSeekChat => {
60 "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
61 }
62 ModelId::DeepSeekReasoner => {
63 "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
64 }
65 ModelId::ZaiGlm5 => {
67 "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
68 }
69 ModelId::OllamaGptOss20b => {
71 "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
72 }
73 ModelId::OllamaGptOss20bCloud => {
74 "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
75 }
76 ModelId::OllamaGptOss120bCloud => {
77 "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
78 }
79 ModelId::OllamaQwen317b => {
80 "Qwen3 1.7B served locally through Ollama without external API requirements"
81 }
82 ModelId::OllamaQwen3CoderNext => {
83 "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
84 }
85 ModelId::OllamaDeepseekV32Cloud => {
86 "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
87 }
88 ModelId::OllamaQwen3Next80bCloud => {
89 "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
90 }
91 ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
92 ModelId::OllamaMinimaxM25Cloud => {
93 "Exceptional multilingual capabilities to elevate code engineering"
94 }
95 ModelId::OllamaGemini3FlashPreviewCloud => {
96 "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
97 }
98 ModelId::OllamaMinimaxM2Cloud => {
99 "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
100 }
101 ModelId::MinimaxM25 => {
102 "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
103 }
104 ModelId::MoonshotKimiK25 => "Kimi K2.5 - Moonshot.ai's flagship reasoning model",
105 ModelId::HuggingFaceDeepseekV32 => {
106 "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
107 }
108 ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
109 ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
110 ModelId::HuggingFaceMinimaxM25Novita => {
111 "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
112 }
113 ModelId::HuggingFaceDeepseekV32Novita => {
114 "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
115 }
116 ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
117 "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
118 }
119 ModelId::HuggingFaceGlm5Novita => {
120 "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
121 }
122 ModelId::HuggingFaceQwen3CoderNextNovita => {
123 "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
124 }
125 ModelId::HuggingFaceQwen35397BA17BTogether => {
126 "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
127 }
128 ModelId::HuggingFaceStep35Flash => {
129 "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
130 }
131 ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
132 ModelId::OpenRouterQwen3CoderNext => {
133 "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
134 }
135 _ => unreachable!(),
136 }
137 }
138}