Skip to main content

vtcode_config/core/
provider.rs

1use serde::{Deserialize, Serialize};
2
3/// Native OpenAI service tier selection.
4#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
5#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
6#[serde(rename_all = "lowercase")]
7pub enum OpenAIServiceTier {
8    Priority,
9}
10
11impl OpenAIServiceTier {
12    pub const fn as_str(self) -> &'static str {
13        match self {
14            Self::Priority => "priority",
15        }
16    }
17}
18
19/// How VT Code should provision OpenAI hosted shell environments.
20#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
21#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)]
22#[serde(rename_all = "snake_case")]
23pub enum OpenAIHostedShellEnvironment {
24    #[default]
25    ContainerAuto,
26    ContainerReference,
27}
28
29impl OpenAIHostedShellEnvironment {
30    pub const fn as_str(self) -> &'static str {
31        match self {
32            Self::ContainerAuto => "container_auto",
33            Self::ContainerReference => "container_reference",
34        }
35    }
36}
37
38impl OpenAIHostedShellEnvironment {
39    pub const fn uses_container_reference(self) -> bool {
40        matches!(self, Self::ContainerReference)
41    }
42}
43
44/// Reserved keyword values for hosted skill version selection.
45#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
46#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)]
47#[serde(rename_all = "lowercase")]
48pub enum OpenAIHostedSkillVersionKeyword {
49    #[default]
50    Latest,
51}
52
53/// Hosted skill version selector for OpenAI Responses hosted shell mounts.
54#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
55#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
56#[serde(untagged)]
57pub enum OpenAIHostedSkillVersion {
58    Latest(OpenAIHostedSkillVersionKeyword),
59    Number(u64),
60    String(String),
61}
62
63impl Default for OpenAIHostedSkillVersion {
64    fn default() -> Self {
65        Self::Latest(OpenAIHostedSkillVersionKeyword::Latest)
66    }
67}
68
69impl OpenAIHostedSkillVersion {
70    pub fn validation_error(&self, field_path: &str) -> Option<String> {
71        match self {
72            Self::String(value) if value.trim().is_empty() => {
73                Some(format!("`{field_path}` must not be empty when set."))
74            }
75            _ => None,
76        }
77    }
78}
79
80/// Hosted skill reference mounted into an OpenAI hosted shell environment.
81#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
82#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
83#[serde(tag = "type", rename_all = "snake_case")]
84pub enum OpenAIHostedSkill {
85    /// Reference to a pre-registered hosted skill.
86    SkillReference {
87        skill_id: String,
88        #[serde(default)]
89        version: OpenAIHostedSkillVersion,
90    },
91    /// Inline base64 zip bundle.
92    Inline {
93        bundle_b64: String,
94        #[serde(skip_serializing_if = "Option::is_none")]
95        sha256: Option<String>,
96    },
97}
98
99impl OpenAIHostedSkill {
100    pub fn validation_error(&self, index: usize) -> Option<String> {
101        match self {
102            Self::SkillReference { skill_id, version } => {
103                let skill_id_path =
104                    format!("provider.openai.hosted_shell.skills[{index}].skill_id");
105                if skill_id.trim().is_empty() {
106                    return Some(format!(
107                        "`{skill_id_path}` must not be empty when `type = \"skill_reference\"`."
108                    ));
109                }
110
111                let version_path = format!("provider.openai.hosted_shell.skills[{index}].version");
112                version.validation_error(&version_path)
113            }
114            Self::Inline { bundle_b64, .. } => {
115                let bundle_path =
116                    format!("provider.openai.hosted_shell.skills[{index}].bundle_b64");
117                if bundle_b64.trim().is_empty() {
118                    return Some(format!(
119                        "`{bundle_path}` must not be empty when `type = \"inline\"`."
120                    ));
121                }
122                None
123            }
124        }
125    }
126}
127
128/// OpenAI hosted shell configuration.
129#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
130#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)]
131pub struct OpenAIHostedShellConfig {
132    /// Enable OpenAI hosted shell instead of VT Code's local shell tool.
133    #[serde(default)]
134    pub enabled: bool,
135
136    /// Environment provisioning mode for hosted shell.
137    #[serde(default)]
138    pub environment: OpenAIHostedShellEnvironment,
139
140    /// Existing OpenAI container ID to reuse when `environment = "container_reference"`.
141    #[serde(default, skip_serializing_if = "Option::is_none")]
142    pub container_id: Option<String>,
143
144    /// File IDs to mount when using `container_auto`.
145    #[serde(default, skip_serializing_if = "Vec::is_empty")]
146    pub file_ids: Vec<String>,
147
148    /// Hosted skills to mount when using `container_auto`.
149    #[serde(default, skip_serializing_if = "Vec::is_empty")]
150    pub skills: Vec<OpenAIHostedSkill>,
151}
152
153impl OpenAIHostedShellConfig {
154    pub fn container_id_ref(&self) -> Option<&str> {
155        self.container_id
156            .as_deref()
157            .map(str::trim)
158            .filter(|value| !value.is_empty())
159    }
160
161    pub const fn uses_container_reference(&self) -> bool {
162        self.environment.uses_container_reference()
163    }
164
165    pub fn first_invalid_skill_message(&self) -> Option<String> {
166        if self.uses_container_reference() {
167            return None;
168        }
169
170        self.skills
171            .iter()
172            .enumerate()
173            .find_map(|(index, skill)| skill.validation_error(index))
174    }
175
176    pub fn has_valid_skill_mounts(&self) -> bool {
177        self.first_invalid_skill_message().is_none()
178    }
179
180    pub fn has_valid_reference_target(&self) -> bool {
181        !self.uses_container_reference() || self.container_id_ref().is_some()
182    }
183
184    pub fn is_valid_for_runtime(&self) -> bool {
185        self.has_valid_reference_target() && self.has_valid_skill_mounts()
186    }
187}
188
189/// OpenAI-specific provider configuration
190#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
191#[derive(Debug, Clone, Deserialize, Serialize, Default)]
192pub struct OpenAIConfig {
193    /// Enable Responses API WebSocket transport for non-streaming requests.
194    /// This is an opt-in path designed for long-running, tool-heavy workflows.
195    #[serde(default)]
196    pub websocket_mode: bool,
197
198    /// Optional Responses API `store` flag.
199    /// Set to `false` to avoid server-side storage when using Responses-compatible models.
200    #[serde(default, skip_serializing_if = "Option::is_none")]
201    pub responses_store: Option<bool>,
202
203    /// Optional Responses API `include` selectors.
204    /// Example: `["reasoning.encrypted_content"]` for encrypted reasoning continuity.
205    #[serde(default, skip_serializing_if = "Vec::is_empty")]
206    pub responses_include: Vec<String>,
207
208    /// Optional native OpenAI `service_tier` request parameter.
209    /// Leave unset to inherit the Project-level default service tier.
210    /// Options: "priority"
211    #[serde(default, skip_serializing_if = "Option::is_none")]
212    pub service_tier: Option<OpenAIServiceTier>,
213
214    /// Optional hosted shell configuration for OpenAI native Responses models.
215    #[serde(default)]
216    pub hosted_shell: OpenAIHostedShellConfig,
217}
218
219/// Anthropic-specific provider configuration
220#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
221#[derive(Debug, Clone, Deserialize, Serialize)]
222pub struct AnthropicConfig {
223    /// DEPRECATED: Model name validation has been removed. The Anthropic API validates
224    /// model names directly, avoiding maintenance burden and allowing flexibility.
225    /// This field is kept for backward compatibility but has no effect.
226    #[deprecated(
227        since = "0.75.0",
228        note = "Model validation removed. API validates model names directly."
229    )]
230    #[serde(default)]
231    pub skip_model_validation: bool,
232
233    /// Enable extended thinking feature for Anthropic models
234    /// When enabled, Claude uses internal reasoning before responding, providing
235    /// enhanced reasoning capabilities for complex tasks.
236    /// Only supported by Claude 4, Claude 4.5, and Claude 3.7 Sonnet models.
237    /// Claude 4.6 uses adaptive thinking instead of extended thinking.
238    /// Note: Extended thinking is now auto-enabled by default (31,999 tokens).
239    /// Set MAX_THINKING_TOKENS=63999 environment variable for 2x budget on 64K models.
240    /// See: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
241    #[serde(default = "default_extended_thinking_enabled")]
242    pub extended_thinking_enabled: bool,
243
244    /// Beta header for interleaved thinking feature
245    #[serde(default = "default_interleaved_thinking_beta")]
246    pub interleaved_thinking_beta: String,
247
248    /// Budget tokens for extended thinking (minimum: 1024, default: 31999)
249    /// On 64K output models (Opus 4.5, Sonnet 4.5, Haiku 4.5): default 31,999, max 63,999
250    /// On 32K output models (Opus 4): max 31,999
251    /// Use MAX_THINKING_TOKENS environment variable to override.
252    #[serde(default = "default_interleaved_thinking_budget_tokens")]
253    pub interleaved_thinking_budget_tokens: u32,
254
255    /// Type value for enabling interleaved thinking
256    #[serde(default = "default_interleaved_thinking_type")]
257    pub interleaved_thinking_type_enabled: String,
258
259    /// Tool search configuration for dynamic tool discovery (advanced-tool-use beta)
260    #[serde(default)]
261    pub tool_search: ToolSearchConfig,
262
263    /// Effort level for token usage (high, medium, low)
264    /// Controls how many tokens Claude uses when responding, trading off between
265    /// response thoroughness and token efficiency.
266    /// Supported by Claude Opus 4.5/4.6 (4.5 requires effort beta header)
267    #[serde(default = "default_effort")]
268    pub effort: String,
269
270    /// Enable token counting via the count_tokens endpoint
271    /// When enabled, the agent can estimate input token counts before making API calls
272    /// Useful for proactive management of rate limits and costs
273    #[serde(default = "default_count_tokens_enabled")]
274    pub count_tokens_enabled: bool,
275}
276
277#[allow(deprecated)]
278impl Default for AnthropicConfig {
279    fn default() -> Self {
280        Self {
281            skip_model_validation: false,
282            extended_thinking_enabled: default_extended_thinking_enabled(),
283            interleaved_thinking_beta: default_interleaved_thinking_beta(),
284            interleaved_thinking_budget_tokens: default_interleaved_thinking_budget_tokens(),
285            interleaved_thinking_type_enabled: default_interleaved_thinking_type(),
286            tool_search: ToolSearchConfig::default(),
287            effort: default_effort(),
288            count_tokens_enabled: default_count_tokens_enabled(),
289        }
290    }
291}
292
293#[inline]
294fn default_count_tokens_enabled() -> bool {
295    false
296}
297
298/// Configuration for Anthropic's tool search feature (advanced-tool-use beta)
299/// Enables dynamic tool discovery for large tool catalogs (up to 10k tools)
300#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
301#[derive(Debug, Clone, Deserialize, Serialize)]
302pub struct ToolSearchConfig {
303    /// Enable tool search feature (requires advanced-tool-use-2025-11-20 beta)
304    #[serde(default)]
305    pub enabled: bool,
306
307    /// Search algorithm: "regex" (Python regex patterns) or "bm25" (natural language)
308    #[serde(default = "default_tool_search_algorithm")]
309    pub algorithm: String,
310
311    /// Automatically defer loading of all tools except core tools
312    #[serde(default = "default_defer_by_default")]
313    pub defer_by_default: bool,
314
315    /// Maximum number of tool search results to return
316    #[serde(default = "default_max_results")]
317    pub max_results: u32,
318
319    /// Tool names that should never be deferred (always available)
320    #[serde(default)]
321    pub always_available_tools: Vec<String>,
322}
323
324impl Default for ToolSearchConfig {
325    fn default() -> Self {
326        Self {
327            enabled: false,
328            algorithm: default_tool_search_algorithm(),
329            defer_by_default: default_defer_by_default(),
330            max_results: default_max_results(),
331            always_available_tools: vec![],
332        }
333    }
334}
335
336#[inline]
337fn default_tool_search_algorithm() -> String {
338    "regex".to_string()
339}
340
341#[inline]
342fn default_defer_by_default() -> bool {
343    true
344}
345
346#[inline]
347fn default_max_results() -> u32 {
348    5
349}
350
351#[inline]
352fn default_extended_thinking_enabled() -> bool {
353    true
354}
355
356#[inline]
357fn default_interleaved_thinking_beta() -> String {
358    "interleaved-thinking-2025-05-14".to_string()
359}
360
361#[inline]
362fn default_interleaved_thinking_budget_tokens() -> u32 {
363    31999
364}
365
366#[inline]
367fn default_interleaved_thinking_type() -> String {
368    "enabled".to_string()
369}
370
371#[inline]
372fn default_effort() -> String {
373    "low".to_string()
374}
375
376#[cfg(test)]
377mod tests {
378    use super::{
379        OpenAIConfig, OpenAIHostedShellConfig, OpenAIHostedShellEnvironment, OpenAIHostedSkill,
380        OpenAIHostedSkillVersion, OpenAIServiceTier,
381    };
382
383    #[test]
384    fn openai_config_defaults_to_websocket_mode_disabled() {
385        let config = OpenAIConfig::default();
386        assert!(!config.websocket_mode);
387        assert_eq!(config.responses_store, None);
388        assert!(config.responses_include.is_empty());
389        assert_eq!(config.service_tier, None);
390        assert_eq!(config.hosted_shell, OpenAIHostedShellConfig::default());
391    }
392
393    #[test]
394    fn openai_config_parses_websocket_mode_opt_in() {
395        let parsed: OpenAIConfig =
396            toml::from_str("websocket_mode = true").expect("config should parse");
397        assert!(parsed.websocket_mode);
398        assert_eq!(parsed.responses_store, None);
399        assert!(parsed.responses_include.is_empty());
400        assert_eq!(parsed.service_tier, None);
401        assert_eq!(parsed.hosted_shell, OpenAIHostedShellConfig::default());
402    }
403
404    #[test]
405    fn openai_config_parses_responses_options() {
406        let parsed: OpenAIConfig = toml::from_str(
407            r#"
408responses_store = false
409responses_include = ["reasoning.encrypted_content", "output_text.annotations"]
410"#,
411        )
412        .expect("config should parse");
413        assert_eq!(parsed.responses_store, Some(false));
414        assert_eq!(
415            parsed.responses_include,
416            vec![
417                "reasoning.encrypted_content".to_string(),
418                "output_text.annotations".to_string()
419            ]
420        );
421        assert_eq!(parsed.service_tier, None);
422        assert_eq!(parsed.hosted_shell, OpenAIHostedShellConfig::default());
423    }
424
425    #[test]
426    fn openai_config_parses_service_tier() {
427        let parsed: OpenAIConfig =
428            toml::from_str(r#"service_tier = "priority""#).expect("config should parse");
429        assert_eq!(parsed.service_tier, Some(OpenAIServiceTier::Priority));
430    }
431
432    #[test]
433    fn openai_config_parses_hosted_shell() {
434        let parsed: OpenAIConfig = toml::from_str(
435            r#"
436[hosted_shell]
437enabled = true
438environment = "container_auto"
439file_ids = ["file_123"]
440
441[[hosted_shell.skills]]
442type = "skill_reference"
443skill_id = "skill_123"
444"#,
445        )
446        .expect("config should parse");
447
448        assert!(parsed.hosted_shell.enabled);
449        assert_eq!(
450            parsed.hosted_shell.environment,
451            OpenAIHostedShellEnvironment::ContainerAuto
452        );
453        assert_eq!(parsed.hosted_shell.file_ids, vec!["file_123".to_string()]);
454        assert_eq!(
455            parsed.hosted_shell.skills,
456            vec![OpenAIHostedSkill::SkillReference {
457                skill_id: "skill_123".to_string(),
458                version: OpenAIHostedSkillVersion::default(),
459            }]
460        );
461    }
462
463    #[test]
464    fn openai_config_parses_hosted_shell_pinned_version_and_inline_bundle() {
465        let parsed: OpenAIConfig = toml::from_str(
466            r#"
467[hosted_shell]
468enabled = true
469
470[[hosted_shell.skills]]
471type = "skill_reference"
472skill_id = "skill_123"
473version = 2
474
475[[hosted_shell.skills]]
476type = "inline"
477bundle_b64 = "UEsFBgAAAAAAAA=="
478sha256 = "deadbeef"
479"#,
480        )
481        .expect("config should parse");
482
483        assert_eq!(
484            parsed.hosted_shell.skills,
485            vec![
486                OpenAIHostedSkill::SkillReference {
487                    skill_id: "skill_123".to_string(),
488                    version: OpenAIHostedSkillVersion::Number(2),
489                },
490                OpenAIHostedSkill::Inline {
491                    bundle_b64: "UEsFBgAAAAAAAA==".to_string(),
492                    sha256: Some("deadbeef".to_string()),
493                },
494            ]
495        );
496    }
497
498    #[test]
499    fn hosted_shell_container_reference_requires_non_empty_container_id() {
500        let config = OpenAIHostedShellConfig {
501            enabled: true,
502            environment: OpenAIHostedShellEnvironment::ContainerReference,
503            container_id: Some("   ".to_string()),
504            file_ids: Vec::new(),
505            skills: Vec::new(),
506        };
507
508        assert!(!config.has_valid_reference_target());
509        assert!(config.container_id_ref().is_none());
510    }
511
512    #[test]
513    fn hosted_shell_reports_invalid_skill_reference_mounts() {
514        let config = OpenAIHostedShellConfig {
515            enabled: true,
516            environment: OpenAIHostedShellEnvironment::ContainerAuto,
517            container_id: None,
518            file_ids: Vec::new(),
519            skills: vec![OpenAIHostedSkill::SkillReference {
520                skill_id: "   ".to_string(),
521                version: OpenAIHostedSkillVersion::default(),
522            }],
523        };
524
525        let message = config
526            .first_invalid_skill_message()
527            .expect("invalid mount should be reported");
528
529        assert!(message.contains("provider.openai.hosted_shell.skills[0].skill_id"));
530        assert!(!config.has_valid_skill_mounts());
531        assert!(!config.is_valid_for_runtime());
532    }
533
534    #[test]
535    fn hosted_shell_ignores_skill_validation_for_container_reference() {
536        let config = OpenAIHostedShellConfig {
537            enabled: true,
538            environment: OpenAIHostedShellEnvironment::ContainerReference,
539            container_id: Some("cntr_123".to_string()),
540            file_ids: Vec::new(),
541            skills: vec![OpenAIHostedSkill::Inline {
542                bundle_b64: "   ".to_string(),
543                sha256: None,
544            }],
545        };
546
547        assert!(config.first_invalid_skill_message().is_none());
548        assert!(config.has_valid_skill_mounts());
549        assert!(config.is_valid_for_runtime());
550    }
551}