Skip to main content

vtcode_config/constants/models/
huggingface.rs

1pub const DEFAULT_MODEL: &str = OPENAI_GPT_OSS_120B;
2pub const SUPPORTED_MODELS: &[&str] = &[
3    // Recommended conversational LLMs from HF docs
4    GOOGLE_GEMMA_2_2B_IT,
5    QWEN3_CODER_480B_A35B_INSTRUCT,
6    OPENAI_GPT_OSS_120B,
7    QWEN3_4B_THINKING_2507,
8    QWEN25_7B_INSTRUCT_1M,
9    QWEN25_CODER_32B_INSTRUCT,
10    DEEPSEEK_R1,
11    // Additional supported models
12    DEEPSEEK_V32,
13    OPENAI_GPT_OSS_20B,
14    // Novita inference provider models
15    MINIMAX_M2_5_NOVITA,
16    DEEPSEEK_V32_NOVITA,
17    XIAOMI_MIMO_V2_FLASH_NOVITA,
18    QWEN3_CODER_NEXT_NOVITA,
19    ZAI_GLM_5_NOVITA,
20    // Together inference provider models
21    QWEN3_5_397B_A17B_TOGETHER,
22];
23
24// Recommended conversational LLMs
25pub const GOOGLE_GEMMA_2_2B_IT: &str = "google/gemma-2-2b-it";
26pub const QWEN3_CODER_480B_A35B_INSTRUCT: &str = "Qwen/Qwen3-Coder-480B-A35B-Instruct";
27pub const OPENAI_GPT_OSS_120B: &str = "openai/gpt-oss-120b:huggingface";
28pub const QWEN3_4B_THINKING_2507: &str = "Qwen/Qwen3-4B-Thinking-2507";
29pub const QWEN25_7B_INSTRUCT_1M: &str = "Qwen/Qwen2.5-7B-Instruct-1M";
30pub const QWEN25_CODER_32B_INSTRUCT: &str = "Qwen/Qwen2.5-Coder-32B-Instruct";
31pub const DEEPSEEK_R1: &str = "deepseek-ai/DeepSeek-R1";
32
33// Additional supported models
34pub const DEEPSEEK_V32: &str = "deepseek-ai/DeepSeek-V3.2:huggingface";
35pub const OPENAI_GPT_OSS_20B: &str = "openai/gpt-oss-20b:huggingface";
36
37pub const MINIMAX_M2_5_NOVITA: &str = "MiniMaxAI/MiniMax-M2.5:novita";
38pub const DEEPSEEK_V32_NOVITA: &str = "deepseek-ai/DeepSeek-V3.2:novita";
39pub const XIAOMI_MIMO_V2_FLASH_NOVITA: &str = "XiaomiMiMo/MiMo-V2-Flash:novita";
40pub const QWEN3_CODER_NEXT_NOVITA: &str = "Qwen/Qwen3-Coder-Next:novita";
41pub const ZAI_GLM_5_NOVITA: &str = "zai-org/GLM-5:novita";
42pub const QWEN3_5_397B_A17B_TOGETHER: &str = "Qwen/Qwen3.5-397B-A17B:together";
43
44pub const REASONING_MODELS: &[&str] = &[
45    // All recommended conversational LLMs support reasoning
46    QWEN3_CODER_480B_A35B_INSTRUCT,
47    OPENAI_GPT_OSS_120B,
48    QWEN3_4B_THINKING_2507,
49    DEEPSEEK_R1,
50    // Additional reasoning models
51    DEEPSEEK_V32,
52    OPENAI_GPT_OSS_20B,
53    DEEPSEEK_V32_NOVITA,
54    MINIMAX_M2_5_NOVITA,
55    XIAOMI_MIMO_V2_FLASH_NOVITA,
56    QWEN3_CODER_NEXT_NOVITA,
57    QWEN3_5_397B_A17B_TOGETHER,
58];