Skip to main content

vtcode_config/models/model_id/
description.rs

1use super::ModelId;
2
3impl ModelId {
4    /// Get a description of the model's characteristics
5    pub fn description(&self) -> &'static str {
6        if let Some(meta) = self.openrouter_metadata() {
7            return meta.description;
8        }
9        match self {
10            // Gemini models
11            ModelId::Gemini31ProPreview => {
12                "Latest Gemini 3.1 Pro flagship model with improved thinking, efficiency, and factual consistency"
13            }
14            ModelId::Gemini31ProPreviewCustomTools => {
15                "Gemini 3.1 Pro variant optimized for agentic workflows using custom tools and bash"
16            }
17            ModelId::Gemini31FlashLitePreview => {
18                "Most cost-efficient Gemini 3.1 model, offering fastest performance for high-frequency, lightweight tasks"
19            }
20            ModelId::Gemini3FlashPreview => {
21                "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding"
22            }
23            // OpenAI models
24            ModelId::GPT5 => "Latest most capable OpenAI model with advanced reasoning",
25            ModelId::GPT52 => {
26                "Latest flagship OpenAI model with improved reasoning, xhigh effort, and built-in compaction support"
27            }
28            ModelId::GPT5Mini => "Latest efficient OpenAI model, great for most tasks",
29            ModelId::GPT5Nano => "Latest most cost-effective OpenAI model",
30            ModelId::GPT53Codex => {
31                "GPT-5.3 variant optimized for agentic coding tasks with reasoning effort support (low, medium, high, xhigh)"
32            }
33            ModelId::OpenAIGptOss20b => {
34                "OpenAI's open-source 20B parameter GPT-OSS model using harmony tokenization"
35            }
36            ModelId::OpenAIGptOss120b => {
37                "OpenAI's open-source 120B parameter GPT-OSS model using harmony tokenization"
38            }
39            // Anthropic models
40            ModelId::ClaudeOpus46 => {
41                "Next-gen Anthropic flagship with extended and adaptive thinking support"
42            }
43            ModelId::ClaudeSonnet46 => {
44                "Balanced flagship model for coding with extended and adaptive thinking support"
45            }
46            ModelId::ClaudeHaiku45 => {
47                "Latest efficient Anthropic model optimized for low-latency agent workflows"
48            }
49            // DeepSeek models
50            ModelId::DeepSeekChat => {
51                "DeepSeek V3.2 - Fast, efficient chat model for immediate responses"
52            }
53            ModelId::DeepSeekReasoner => {
54                "DeepSeek V3.2 - Thinking mode with integrated tool-use and reasoning capability"
55            }
56            // Z.AI models
57            ModelId::ZaiGlm5 => {
58                "Z.ai flagship GLM-5 foundation model engineered for complex systems design and long-horizon agent workflows"
59            }
60            // Ollama models
61            ModelId::OllamaGptOss20b => {
62                "Local GPT-OSS 20B deployment served via Ollama with no external API dependency"
63            }
64            ModelId::OllamaGptOss20bCloud => {
65                "Cloud-hosted GPT-OSS 20B accessed through Ollama Cloud for efficient reasoning tasks"
66            }
67            ModelId::OllamaGptOss120bCloud => {
68                "Cloud-hosted GPT-OSS 120B accessed through Ollama Cloud for larger reasoning tasks"
69            }
70            ModelId::OllamaQwen317b => {
71                "Qwen3 1.7B served locally through Ollama without external API requirements"
72            }
73            ModelId::OllamaQwen3CoderNext => {
74                "Qwen3-Coder-Next served via Ollama Cloud with 256K context, strong coding/tool-use performance, and non-thinking mode responses"
75            }
76            ModelId::OllamaDeepseekV32Cloud => {
77                "DeepSeek V3.2 cloud deployment via Ollama with enhanced reasoning and instruction following"
78            }
79            ModelId::OllamaQwen3Next80bCloud => {
80                "Qwen3 Next generation 80B model via Ollama Cloud with improved reasoning and long context"
81            }
82            ModelId::OllamaGlm5Cloud => "Cloud-hosted GLM-5 model served via Ollama Cloud",
83            ModelId::OllamaMinimaxM25Cloud => {
84                "Exceptional multilingual capabilities to elevate code engineering"
85            }
86            ModelId::OllamaGemini3FlashPreviewCloud => {
87                "Gemini 3 Flash offers frontier intelligence built for speed at a fraction of the cost."
88            }
89            ModelId::OllamaMinimaxM2Cloud => {
90                "Cloud-hosted MiniMax-M2 model accessed through Ollama Cloud for reasoning tasks"
91            }
92            ModelId::MinimaxM25 => {
93                "Latest MiniMax-M2.5 model with further improvements in reasoning and coding"
94            }
95            ModelId::MoonshotKimiK25 => "Kimi K2.5 - Moonshot.ai's flagship reasoning model",
96            ModelId::HuggingFaceDeepseekV32 => {
97                "DeepSeek-V3.2 via Hugging Face router for advanced reasoning"
98            }
99            ModelId::HuggingFaceOpenAIGptOss20b => "OpenAI GPT-OSS 20B via Hugging Face router",
100            ModelId::HuggingFaceOpenAIGptOss120b => "OpenAI GPT-OSS 120B via Hugging Face router",
101            ModelId::HuggingFaceMinimaxM25Novita => {
102                "MiniMax-M2.5 model via Novita inference provider on HuggingFace router. Enhanced reasoning capabilities."
103            }
104            ModelId::HuggingFaceDeepseekV32Novita => {
105                "DeepSeek-V3.2 via Novita inference provider on HuggingFace router."
106            }
107            ModelId::HuggingFaceXiaomiMimoV2FlashNovita => {
108                "Xiaomi MiMo-V2-Flash via Novita on HuggingFace router."
109            }
110            ModelId::HuggingFaceGlm5Novita => {
111                "Z.ai GLM-5 flagship model via Novita inference provider on HuggingFace router."
112            }
113            ModelId::HuggingFaceQwen3CoderNextNovita => {
114                "Qwen3-Coder-Next via Novita inference provider on HuggingFace router. Coding-optimized model with reasoning capabilities."
115            }
116            ModelId::HuggingFaceQwen35397BA17BTogether => {
117                "Qwen3.5-397B-A17B via Together inference provider on HuggingFace router. Vision-language model with linear attention and sparse MoE, 1M context window."
118            }
119            ModelId::HuggingFaceStep35Flash => {
120                "Step 3.5 Flash flagship model via HuggingFace router (featherless-ai provider). Supports streaming and fast inference."
121            }
122            ModelId::OpenRouterMinimaxM25 => "MiniMax-M2.5 flagship model via OpenRouter",
123            ModelId::OpenRouterQwen3CoderNext => {
124                "Next-generation Qwen3 coding model optimized for agentic workflows via OpenRouter"
125            }
126            _ => unreachable!(),
127        }
128    }
129}