vtcode_config/constants/models/
ollama.rs1pub const DEFAULT_LOCAL_MODEL: &str = "gpt-oss:20b";
2pub const DEFAULT_CLOUD_MODEL: &str = "gpt-oss:120b-cloud";
3pub const DEFAULT_MODEL: &str = DEFAULT_LOCAL_MODEL;
4pub const SUPPORTED_MODELS: &[&str] = &[
5 DEFAULT_LOCAL_MODEL,
6 QWEN3_1_7B,
7 QWEN3_CODER_NEXT,
8 DEFAULT_CLOUD_MODEL,
9 GPT_OSS_20B_CLOUD,
10 DEEPSEEK_V32_CLOUD,
11 QWEN3_NEXT_80B_CLOUD,
12 GLM_5_CLOUD,
13 GEMINI_3_1_PRO_PREVIEW_LATEST_CLOUD,
14 GEMINI_3_FLASH_PREVIEW_CLOUD,
15 MINIMAX_M2_CLOUD,
16 MINIMAX_M25_CLOUD,
17];
18
19pub const REASONING_MODELS: &[&str] = &[
21 GPT_OSS_20B,
22 GPT_OSS_20B_CLOUD,
23 GPT_OSS_120B_CLOUD,
24 QWEN3_1_7B,
25 DEEPSEEK_V32_CLOUD,
26 QWEN3_NEXT_80B_CLOUD,
27 GLM_5_CLOUD,
28 GEMINI_3_1_PRO_PREVIEW_LATEST_CLOUD,
29 GEMINI_3_FLASH_PREVIEW_CLOUD,
30 MINIMAX_M2_CLOUD,
31 MINIMAX_M25_CLOUD,
32];
33
34pub const REASONING_LEVEL_MODELS: &[&str] = &[
36 GPT_OSS_20B,
37 GPT_OSS_20B_CLOUD,
38 GPT_OSS_120B_CLOUD,
39 GLM_5_CLOUD,
40 MINIMAX_M2_CLOUD,
41 MINIMAX_M25_CLOUD,
42 GEMINI_3_FLASH_PREVIEW_CLOUD,
43];
44
45pub const GPT_OSS_20B: &str = DEFAULT_LOCAL_MODEL;
46pub const GPT_OSS_20B_CLOUD: &str = "gpt-oss:20b-cloud";
47pub const GPT_OSS_120B_CLOUD: &str = DEFAULT_CLOUD_MODEL;
48pub const QWEN3_1_7B: &str = "qwen3:1.7b";
49pub const QWEN3_CODER_NEXT: &str = "qwen3-coder-next:cloud";
50pub const DEEPSEEK_V32_CLOUD: &str = "deepseek-v3.2:cloud";
51pub const QWEN3_NEXT_80B_CLOUD: &str = "qwen3-next:80b-cloud";
52pub const GLM_5_CLOUD: &str = "glm-5:cloud";
53pub const GEMINI_3_1_PRO_PREVIEW_LATEST_CLOUD: &str = "gemini-3.1-pro-preview:latest";
54pub const GEMINI_3_FLASH_PREVIEW_CLOUD: &str = "gemini-3-flash-preview:cloud";
55pub const MINIMAX_M2_CLOUD: &str = "minimax-m2:cloud";
56pub const MINIMAX_M25_CLOUD: &str = "minimax-m2.5:cloud";