vtcode_core/config/
constants.rs

1/// Prompt path constants to avoid hardcoding throughout the codebase
2pub mod prompts {
3    pub const DEFAULT_SYSTEM_PROMPT_PATH: &str = "prompts/system.md";
4    pub const CODER_SYSTEM_PROMPT_PATH: &str = "prompts/coder_system.md";
5}
6
7/// Model ID constants to sync with docs/models.json
8pub mod models {
9    // Google/Gemini models
10    pub mod google {
11        pub const DEFAULT_MODEL: &str = "gemini-2.5-flash-preview-05-20";
12        pub const SUPPORTED_MODELS: &[&str] = &[
13            "gemini-2.5-flash-preview-05-20",
14            "gemini-2.5-pro",
15            "gemini-2.5-flash",
16            "gemini-2.5-flash-lite",
17        ];
18
19        // Convenience constants for commonly used models
20        pub const GEMINI_2_5_FLASH_PREVIEW: &str = "gemini-2.5-flash-preview-05-20";
21        pub const GEMINI_2_5_PRO: &str = "gemini-2.5-pro";
22        pub const GEMINI_2_5_FLASH: &str = "gemini-2.5-flash";
23        pub const GEMINI_2_5_FLASH_LITE: &str = "gemini-2.5-flash-lite";
24    }
25
26    // OpenAI models (from docs/models.json)
27    pub mod openai {
28        pub const DEFAULT_MODEL: &str = "gpt-5";
29        pub const SUPPORTED_MODELS: &[&str] =
30            &["gpt-5", "gpt-5-mini", "gpt-5-nano", "codex-mini-latest"];
31
32        /// Models that support the OpenAI reasoning API extensions
33        pub const REASONING_MODELS: &[&str] = &[GPT_5, GPT_5_MINI, GPT_5_NANO];
34
35        // Convenience constants for commonly used models
36        pub const GPT_5: &str = "gpt-5";
37        pub const GPT_5_MINI: &str = "gpt-5-mini";
38        pub const GPT_5_NANO: &str = "gpt-5-nano";
39        pub const CODEX_MINI_LATEST: &str = "codex-mini-latest";
40        pub const CODEX_MINI: &str = "codex-mini";
41    }
42
43    // OpenRouter models (extensible via vtcode.toml)
44    pub mod openrouter {
45        pub const DEFAULT_MODEL: &str = "x-ai/grok-code-fast-1";
46        pub const SUPPORTED_MODELS: &[&str] = &[
47            "x-ai/grok-code-fast-1",
48            "x-ai/grok-4-fast:free",
49            "qwen/qwen3-coder",
50            "deepseek/deepseek-chat-v3.1",
51            "openai/gpt-5",
52            "anthropic/claude-sonnet-4",
53        ];
54
55        /// Models that expose reasoning traces via OpenRouter APIs
56        pub const REASONING_MODELS: &[&str] = &[
57            X_AI_GROK_4_FAST_FREE,
58            OPENAI_GPT_5,
59            ANTHROPIC_CLAUDE_SONNET_4,
60        ];
61
62        pub const X_AI_GROK_CODE_FAST_1: &str = "x-ai/grok-code-fast-1";
63        pub const X_AI_GROK_4_FAST_FREE: &str = "x-ai/grok-4-fast:free";
64        pub const QWEN3_CODER: &str = "qwen/qwen3-coder";
65        pub const DEEPSEEK_DEEPSEEK_CHAT_V3_1: &str = "deepseek/deepseek-chat-v3.1";
66        pub const OPENAI_GPT_5: &str = "openai/gpt-5";
67        pub const ANTHROPIC_CLAUDE_SONNET_4: &str = "anthropic/claude-sonnet-4";
68    }
69
70    // Anthropic models (from docs/models.json) - Updated for tool use best practices
71    pub mod anthropic {
72        // Standard model for straightforward tools - Sonnet 4 preferred for most use cases
73        pub const DEFAULT_MODEL: &str = "claude-sonnet-4-20250514";
74        pub const SUPPORTED_MODELS: &[&str] = &[
75            "claude-opus-4-1-20250805", // Latest: Opus 4.1 (2025-08-05)
76            "claude-sonnet-4-20250514", // Latest: Sonnet 4 (2025-05-14)
77        ];
78
79        // Convenience constants for commonly used models
80        pub const CLAUDE_OPUS_4_1_20250805: &str = "claude-opus-4-1-20250805";
81        pub const CLAUDE_SONNET_4_20250514: &str = "claude-sonnet-4-20250514";
82    }
83
84    // xAI models
85    pub mod xai {
86        pub const DEFAULT_MODEL: &str = "grok-2-latest";
87        pub const SUPPORTED_MODELS: &[&str] = &[
88            "grok-2-latest",
89            "grok-2",
90            "grok-2-mini",
91            "grok-2-reasoning",
92            "grok-2-vision",
93        ];
94
95        pub const GROK_2_LATEST: &str = "grok-2-latest";
96        pub const GROK_2: &str = "grok-2";
97        pub const GROK_2_MINI: &str = "grok-2-mini";
98        pub const GROK_2_REASONING: &str = "grok-2-reasoning";
99        pub const GROK_2_VISION: &str = "grok-2-vision";
100    }
101
102    // Backwards compatibility - keep old constants working
103    pub const GEMINI_2_5_FLASH_PREVIEW: &str = google::GEMINI_2_5_FLASH_PREVIEW;
104    pub const GEMINI_2_5_FLASH: &str = google::GEMINI_2_5_FLASH;
105    pub const GEMINI_2_5_PRO: &str = google::GEMINI_2_5_PRO;
106    pub const GEMINI_2_5_FLASH_LITE: &str = google::GEMINI_2_5_FLASH_LITE;
107    pub const GPT_5: &str = openai::GPT_5;
108    pub const GPT_5_MINI: &str = openai::GPT_5_MINI;
109    pub const GPT_5_NANO: &str = openai::GPT_5_NANO;
110    pub const CODEX_MINI: &str = openai::CODEX_MINI;
111    pub const CODEX_MINI_LATEST: &str = openai::CODEX_MINI_LATEST;
112    pub const CLAUDE_OPUS_4_1_20250805: &str = anthropic::CLAUDE_OPUS_4_1_20250805;
113    pub const CLAUDE_SONNET_4_20250514: &str = anthropic::CLAUDE_SONNET_4_20250514;
114    pub const OPENROUTER_X_AI_GROK_CODE_FAST_1: &str = openrouter::X_AI_GROK_CODE_FAST_1;
115    pub const OPENROUTER_X_AI_GROK_4_FAST_FREE: &str = openrouter::X_AI_GROK_4_FAST_FREE;
116    pub const OPENROUTER_QWEN3_CODER: &str = openrouter::QWEN3_CODER;
117    pub const OPENROUTER_DEEPSEEK_CHAT_V3_1: &str = openrouter::DEEPSEEK_DEEPSEEK_CHAT_V3_1;
118    pub const OPENROUTER_OPENAI_GPT_5: &str = openrouter::OPENAI_GPT_5;
119    pub const OPENROUTER_ANTHROPIC_CLAUDE_SONNET_4: &str = openrouter::ANTHROPIC_CLAUDE_SONNET_4;
120    pub const XAI_GROK_2_LATEST: &str = xai::GROK_2_LATEST;
121    pub const XAI_GROK_2: &str = xai::GROK_2;
122    pub const XAI_GROK_2_MINI: &str = xai::GROK_2_MINI;
123    pub const XAI_GROK_2_REASONING: &str = xai::GROK_2_REASONING;
124    pub const XAI_GROK_2_VISION: &str = xai::GROK_2_VISION;
125}
126
127/// Model validation and helper functions
128pub mod model_helpers {
129    use super::models;
130
131    /// Get supported models for a provider
132    pub fn supported_for(provider: &str) -> Option<&'static [&'static str]> {
133        match provider {
134            "google" | "gemini" => Some(models::google::SUPPORTED_MODELS),
135            "openai" => Some(models::openai::SUPPORTED_MODELS),
136            "anthropic" => Some(models::anthropic::SUPPORTED_MODELS),
137            "openrouter" => Some(models::openrouter::SUPPORTED_MODELS),
138            "xai" => Some(models::xai::SUPPORTED_MODELS),
139            _ => None,
140        }
141    }
142
143    /// Get default model for a provider
144    pub fn default_for(provider: &str) -> Option<&'static str> {
145        match provider {
146            "google" | "gemini" => Some(models::google::DEFAULT_MODEL),
147            "openai" => Some(models::openai::DEFAULT_MODEL),
148            "anthropic" => Some(models::anthropic::DEFAULT_MODEL),
149            "openrouter" => Some(models::openrouter::DEFAULT_MODEL),
150            "xai" => Some(models::xai::DEFAULT_MODEL),
151            _ => None,
152        }
153    }
154
155    /// Validate if a model is supported by a provider
156    pub fn is_valid(provider: &str, model: &str) -> bool {
157        supported_for(provider)
158            .map(|list| list.iter().any(|m| *m == model))
159            .unwrap_or(false)
160    }
161}
162
163/// Default configuration values
164pub mod defaults {
165    use super::models;
166
167    pub const DEFAULT_MODEL: &str = models::google::GEMINI_2_5_FLASH_PREVIEW;
168    pub const DEFAULT_CLI_MODEL: &str = models::google::GEMINI_2_5_FLASH_PREVIEW;
169    pub const DEFAULT_PROVIDER: &str = "gemini";
170    pub const DEFAULT_API_KEY_ENV: &str = "GEMINI_API_KEY";
171    pub const DEFAULT_THEME: &str = "ciapre-dark";
172    pub const DEFAULT_MAX_TOOL_LOOPS: usize = 100;
173    pub const ANTHROPIC_DEFAULT_MAX_TOKENS: u32 = 4_096;
174}
175
176/// Reasoning effort configuration constants
177pub mod reasoning {
178    pub const LOW: &str = "low";
179    pub const MEDIUM: &str = "medium";
180    pub const HIGH: &str = "high";
181    pub const ALLOWED_LEVELS: &[&str] = &[LOW, MEDIUM, HIGH];
182}
183
184/// Message role constants to avoid hardcoding strings
185pub mod message_roles {
186    pub const SYSTEM: &str = "system";
187    pub const USER: &str = "user";
188    pub const ASSISTANT: &str = "assistant";
189    pub const TOOL: &str = "tool";
190}
191
192/// URL constants for API endpoints
193pub mod urls {
194    pub const GEMINI_API_BASE: &str = "https://generativelanguage.googleapis.com/v1beta";
195    pub const OPENAI_API_BASE: &str = "https://api.openai.com/v1";
196    pub const ANTHROPIC_API_BASE: &str = "https://api.anthropic.com/v1";
197    pub const ANTHROPIC_API_VERSION: &str = "2023-06-01";
198    pub const OPENROUTER_API_BASE: &str = "https://openrouter.ai/api/v1";
199    pub const XAI_API_BASE: &str = "https://api.x.ai/v1";
200}
201
202/// Tool name constants to avoid hardcoding strings throughout the codebase
203pub mod tools {
204    pub const GREP_SEARCH: &str = "grep_search";
205    pub const LIST_FILES: &str = "list_files";
206    pub const RUN_TERMINAL_CMD: &str = "run_terminal_cmd";
207    pub const READ_FILE: &str = "read_file";
208    pub const WRITE_FILE: &str = "write_file";
209    pub const EDIT_FILE: &str = "edit_file";
210    pub const DELETE_FILE: &str = "delete_file";
211    pub const CREATE_FILE: &str = "create_file";
212    pub const AST_GREP_SEARCH: &str = "ast_grep_search";
213    pub const SIMPLE_SEARCH: &str = "simple_search";
214    pub const BASH: &str = "bash";
215    pub const APPLY_PATCH: &str = "apply_patch";
216    pub const SRGN: &str = "srgn";
217    pub const CURL: &str = "curl";
218    pub const UPDATE_PLAN: &str = "update_plan";
219
220    // Explorer-specific tools
221    pub const FILE_METADATA: &str = "file_metadata";
222    pub const PROJECT_OVERVIEW: &str = "project_overview";
223    pub const TREE_SITTER_ANALYZE: &str = "tree_sitter_analyze";
224
225    // Special wildcard for full access
226    pub const WILDCARD_ALL: &str = "*";
227}
228
229pub mod project_doc {
230    pub const DEFAULT_MAX_BYTES: usize = 16 * 1024;
231}
232
233/// Context window management defaults
234pub mod context {
235    /// Approximate character count per token when estimating context size
236    pub const CHAR_PER_TOKEN_APPROX: usize = 3;
237
238    /// Default maximum context window (in approximate tokens)
239    pub const DEFAULT_MAX_TOKENS: usize = 90_000;
240
241    /// Trim target as a percentage of the maximum token budget
242    pub const DEFAULT_TRIM_TO_PERCENT: u8 = 80;
243
244    /// Minimum allowed trim percentage (prevents overly aggressive retention)
245    pub const MIN_TRIM_RATIO_PERCENT: u8 = 60;
246
247    /// Maximum allowed trim percentage (prevents minimal trimming)
248    pub const MAX_TRIM_RATIO_PERCENT: u8 = 90;
249
250    /// Default number of recent turns to preserve verbatim
251    pub const DEFAULT_PRESERVE_RECENT_TURNS: usize = 12;
252
253    /// Minimum number of recent turns that must remain after trimming
254    pub const MIN_PRESERVE_RECENT_TURNS: usize = 6;
255
256    /// Maximum number of recent turns to keep when aggressively reducing context
257    pub const AGGRESSIVE_PRESERVE_RECENT_TURNS: usize = 8;
258
259    /// Maximum number of retry attempts when the provider signals context overflow
260    pub const CONTEXT_ERROR_RETRY_LIMIT: usize = 2;
261}
262
263/// Chunking constants for large file handling
264pub mod chunking {
265    /// Maximum lines before triggering chunking for read_file
266    pub const MAX_LINES_THRESHOLD: usize = 2_000;
267
268    /// Number of lines to read from start of file when chunking
269    pub const CHUNK_START_LINES: usize = 800;
270
271    /// Number of lines to read from end of file when chunking
272    pub const CHUNK_END_LINES: usize = 800;
273
274    /// Maximum lines for terminal command output before truncation
275    pub const MAX_TERMINAL_OUTPUT_LINES: usize = 3_000;
276
277    /// Number of lines to show from start of terminal output when truncating
278    pub const TERMINAL_OUTPUT_START_LINES: usize = 1_000;
279
280    /// Number of lines to show from end of terminal output when truncating
281    pub const TERMINAL_OUTPUT_END_LINES: usize = 1_000;
282
283    /// Maximum content size for write_file before chunking (in bytes)
284    pub const MAX_WRITE_CONTENT_SIZE: usize = 500_000; // 500KB
285
286    /// Chunk size for write operations (in bytes)
287    pub const WRITE_CHUNK_SIZE: usize = 50_000; // 50KB chunks
288}