Skip to main content

vtcode_config/core/
provider.rs

1use serde::{Deserialize, Serialize};
2
3/// Native OpenAI service tier selection.
4#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
5#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
6#[serde(rename_all = "lowercase")]
7pub enum OpenAIServiceTier {
8    Priority,
9}
10
11impl OpenAIServiceTier {
12    pub const fn as_str(self) -> &'static str {
13        match self {
14            Self::Priority => "priority",
15        }
16    }
17}
18
19/// OpenAI-specific provider configuration
20#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
21#[derive(Debug, Clone, Deserialize, Serialize, Default)]
22pub struct OpenAIConfig {
23    /// Enable Responses API WebSocket transport for non-streaming requests.
24    /// This is an opt-in path designed for long-running, tool-heavy workflows.
25    #[serde(default)]
26    pub websocket_mode: bool,
27
28    /// Optional Responses API `store` flag.
29    /// Set to `false` to avoid server-side storage when using Responses-compatible models.
30    #[serde(default, skip_serializing_if = "Option::is_none")]
31    pub responses_store: Option<bool>,
32
33    /// Optional Responses API `include` selectors.
34    /// Example: `["reasoning.encrypted_content"]` for encrypted reasoning continuity.
35    #[serde(default, skip_serializing_if = "Vec::is_empty")]
36    pub responses_include: Vec<String>,
37
38    /// Optional native OpenAI `service_tier` request parameter.
39    /// Leave unset to inherit the Project-level default service tier.
40    /// Options: "priority"
41    #[serde(default, skip_serializing_if = "Option::is_none")]
42    pub service_tier: Option<OpenAIServiceTier>,
43}
44
45/// Anthropic-specific provider configuration
46#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
47#[derive(Debug, Clone, Deserialize, Serialize)]
48pub struct AnthropicConfig {
49    /// DEPRECATED: Model name validation has been removed. The Anthropic API validates
50    /// model names directly, avoiding maintenance burden and allowing flexibility.
51    /// This field is kept for backward compatibility but has no effect.
52    #[deprecated(
53        since = "0.75.0",
54        note = "Model validation removed. API validates model names directly."
55    )]
56    #[serde(default)]
57    pub skip_model_validation: bool,
58
59    /// Enable extended thinking feature for Anthropic models
60    /// When enabled, Claude uses internal reasoning before responding, providing
61    /// enhanced reasoning capabilities for complex tasks.
62    /// Only supported by Claude 4, Claude 4.5, and Claude 3.7 Sonnet models.
63    /// Claude 4.6 uses adaptive thinking instead of extended thinking.
64    /// Note: Extended thinking is now auto-enabled by default (31,999 tokens).
65    /// Set MAX_THINKING_TOKENS=63999 environment variable for 2x budget on 64K models.
66    /// See: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
67    #[serde(default = "default_extended_thinking_enabled")]
68    pub extended_thinking_enabled: bool,
69
70    /// Beta header for interleaved thinking feature
71    #[serde(default = "default_interleaved_thinking_beta")]
72    pub interleaved_thinking_beta: String,
73
74    /// Budget tokens for extended thinking (minimum: 1024, default: 31999)
75    /// On 64K output models (Opus 4.5, Sonnet 4.5, Haiku 4.5): default 31,999, max 63,999
76    /// On 32K output models (Opus 4): max 31,999
77    /// Use MAX_THINKING_TOKENS environment variable to override.
78    #[serde(default = "default_interleaved_thinking_budget_tokens")]
79    pub interleaved_thinking_budget_tokens: u32,
80
81    /// Type value for enabling interleaved thinking
82    #[serde(default = "default_interleaved_thinking_type")]
83    pub interleaved_thinking_type_enabled: String,
84
85    /// Tool search configuration for dynamic tool discovery (advanced-tool-use beta)
86    #[serde(default)]
87    pub tool_search: ToolSearchConfig,
88
89    /// Effort level for token usage (high, medium, low)
90    /// Controls how many tokens Claude uses when responding, trading off between
91    /// response thoroughness and token efficiency.
92    /// Supported by Claude Opus 4.5/4.6 (4.5 requires effort beta header)
93    #[serde(default = "default_effort")]
94    pub effort: String,
95
96    /// Enable token counting via the count_tokens endpoint
97    /// When enabled, the agent can estimate input token counts before making API calls
98    /// Useful for proactive management of rate limits and costs
99    #[serde(default = "default_count_tokens_enabled")]
100    pub count_tokens_enabled: bool,
101}
102
103#[allow(deprecated)]
104impl Default for AnthropicConfig {
105    fn default() -> Self {
106        Self {
107            skip_model_validation: false,
108            extended_thinking_enabled: default_extended_thinking_enabled(),
109            interleaved_thinking_beta: default_interleaved_thinking_beta(),
110            interleaved_thinking_budget_tokens: default_interleaved_thinking_budget_tokens(),
111            interleaved_thinking_type_enabled: default_interleaved_thinking_type(),
112            tool_search: ToolSearchConfig::default(),
113            effort: default_effort(),
114            count_tokens_enabled: default_count_tokens_enabled(),
115        }
116    }
117}
118
119#[inline]
120fn default_count_tokens_enabled() -> bool {
121    false
122}
123
124/// Configuration for Anthropic's tool search feature (advanced-tool-use beta)
125/// Enables dynamic tool discovery for large tool catalogs (up to 10k tools)
126#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
127#[derive(Debug, Clone, Deserialize, Serialize)]
128pub struct ToolSearchConfig {
129    /// Enable tool search feature (requires advanced-tool-use-2025-11-20 beta)
130    #[serde(default)]
131    pub enabled: bool,
132
133    /// Search algorithm: "regex" (Python regex patterns) or "bm25" (natural language)
134    #[serde(default = "default_tool_search_algorithm")]
135    pub algorithm: String,
136
137    /// Automatically defer loading of all tools except core tools
138    #[serde(default = "default_defer_by_default")]
139    pub defer_by_default: bool,
140
141    /// Maximum number of tool search results to return
142    #[serde(default = "default_max_results")]
143    pub max_results: u32,
144
145    /// Tool names that should never be deferred (always available)
146    #[serde(default)]
147    pub always_available_tools: Vec<String>,
148}
149
150impl Default for ToolSearchConfig {
151    fn default() -> Self {
152        Self {
153            enabled: false,
154            algorithm: default_tool_search_algorithm(),
155            defer_by_default: default_defer_by_default(),
156            max_results: default_max_results(),
157            always_available_tools: vec![],
158        }
159    }
160}
161
162#[inline]
163fn default_tool_search_algorithm() -> String {
164    "regex".to_string()
165}
166
167#[inline]
168fn default_defer_by_default() -> bool {
169    true
170}
171
172#[inline]
173fn default_max_results() -> u32 {
174    5
175}
176
177#[inline]
178fn default_extended_thinking_enabled() -> bool {
179    true
180}
181
182#[inline]
183fn default_interleaved_thinking_beta() -> String {
184    "interleaved-thinking-2025-05-14".to_string()
185}
186
187#[inline]
188fn default_interleaved_thinking_budget_tokens() -> u32 {
189    31999
190}
191
192#[inline]
193fn default_interleaved_thinking_type() -> String {
194    "enabled".to_string()
195}
196
197#[inline]
198fn default_effort() -> String {
199    "low".to_string()
200}
201
202#[cfg(test)]
203mod tests {
204    use super::{OpenAIConfig, OpenAIServiceTier};
205
206    #[test]
207    fn openai_config_defaults_to_websocket_mode_disabled() {
208        let config = OpenAIConfig::default();
209        assert!(!config.websocket_mode);
210        assert_eq!(config.responses_store, None);
211        assert!(config.responses_include.is_empty());
212        assert_eq!(config.service_tier, None);
213    }
214
215    #[test]
216    fn openai_config_parses_websocket_mode_opt_in() {
217        let parsed: OpenAIConfig =
218            toml::from_str("websocket_mode = true").expect("config should parse");
219        assert!(parsed.websocket_mode);
220        assert_eq!(parsed.responses_store, None);
221        assert!(parsed.responses_include.is_empty());
222        assert_eq!(parsed.service_tier, None);
223    }
224
225    #[test]
226    fn openai_config_parses_responses_options() {
227        let parsed: OpenAIConfig = toml::from_str(
228            r#"
229responses_store = false
230responses_include = ["reasoning.encrypted_content", "output_text.annotations"]
231"#,
232        )
233        .expect("config should parse");
234        assert_eq!(parsed.responses_store, Some(false));
235        assert_eq!(
236            parsed.responses_include,
237            vec![
238                "reasoning.encrypted_content".to_string(),
239                "output_text.annotations".to_string()
240            ]
241        );
242        assert_eq!(parsed.service_tier, None);
243    }
244
245    #[test]
246    fn openai_config_parses_service_tier() {
247        let parsed: OpenAIConfig =
248            toml::from_str(r#"service_tier = "priority""#).expect("config should parse");
249        assert_eq!(parsed.service_tier, Some(OpenAIServiceTier::Priority));
250    }
251}