Skip to main content

vtcode_config/core/
provider.rs

1use serde::{Deserialize, Serialize};
2
3/// Native OpenAI service tier selection.
4#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
5#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
6#[serde(rename_all = "lowercase")]
7pub enum OpenAIServiceTier {
8    Priority,
9}
10
11impl OpenAIServiceTier {
12    pub const fn as_str(self) -> &'static str {
13        match self {
14            Self::Priority => "priority",
15        }
16    }
17}
18
19/// How VT Code should provision OpenAI hosted shell environments.
20#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
21#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)]
22#[serde(rename_all = "snake_case")]
23pub enum OpenAIHostedShellEnvironment {
24    #[default]
25    ContainerAuto,
26    ContainerReference,
27}
28
29impl OpenAIHostedShellEnvironment {
30    pub const fn as_str(self) -> &'static str {
31        match self {
32            Self::ContainerAuto => "container_auto",
33            Self::ContainerReference => "container_reference",
34        }
35    }
36}
37
38impl OpenAIHostedShellEnvironment {
39    pub const fn uses_container_reference(self) -> bool {
40        matches!(self, Self::ContainerReference)
41    }
42}
43
44/// Reserved keyword values for hosted skill version selection.
45#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
46#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)]
47#[serde(rename_all = "lowercase")]
48pub enum OpenAIHostedSkillVersionKeyword {
49    #[default]
50    Latest,
51}
52
53/// Hosted skill version selector for OpenAI Responses hosted shell mounts.
54#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
55#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
56#[serde(untagged)]
57pub enum OpenAIHostedSkillVersion {
58    Latest(OpenAIHostedSkillVersionKeyword),
59    Number(u64),
60    String(String),
61}
62
63impl Default for OpenAIHostedSkillVersion {
64    fn default() -> Self {
65        Self::Latest(OpenAIHostedSkillVersionKeyword::Latest)
66    }
67}
68
69impl OpenAIHostedSkillVersion {
70    pub fn validation_error(&self, field_path: &str) -> Option<String> {
71        match self {
72            Self::String(value) if value.trim().is_empty() => {
73                Some(format!("`{field_path}` must not be empty when set."))
74            }
75            _ => None,
76        }
77    }
78}
79
80/// Hosted skill reference mounted into an OpenAI hosted shell environment.
81#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
82#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
83#[serde(tag = "type", rename_all = "snake_case")]
84pub enum OpenAIHostedSkill {
85    /// Reference to a pre-registered hosted skill.
86    SkillReference {
87        skill_id: String,
88        #[serde(default)]
89        version: OpenAIHostedSkillVersion,
90    },
91    /// Inline base64 zip bundle.
92    Inline {
93        bundle_b64: String,
94        #[serde(skip_serializing_if = "Option::is_none")]
95        sha256: Option<String>,
96    },
97}
98
99impl OpenAIHostedSkill {
100    pub fn validation_error(&self, index: usize) -> Option<String> {
101        match self {
102            Self::SkillReference { skill_id, version } => {
103                let skill_id_path =
104                    format!("provider.openai.hosted_shell.skills[{index}].skill_id");
105                if skill_id.trim().is_empty() {
106                    return Some(format!(
107                        "`{skill_id_path}` must not be empty when `type = \"skill_reference\"`."
108                    ));
109                }
110
111                let version_path = format!("provider.openai.hosted_shell.skills[{index}].version");
112                version.validation_error(&version_path)
113            }
114            Self::Inline { bundle_b64, .. } => {
115                let bundle_path =
116                    format!("provider.openai.hosted_shell.skills[{index}].bundle_b64");
117                if bundle_b64.trim().is_empty() {
118                    return Some(format!(
119                        "`{bundle_path}` must not be empty when `type = \"inline\"`."
120                    ));
121                }
122                None
123            }
124        }
125    }
126}
127
128/// OpenAI hosted shell configuration.
129#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
130#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)]
131pub struct OpenAIHostedShellConfig {
132    /// Enable OpenAI hosted shell instead of VT Code's local shell tool.
133    #[serde(default)]
134    pub enabled: bool,
135
136    /// Environment provisioning mode for hosted shell.
137    #[serde(default)]
138    pub environment: OpenAIHostedShellEnvironment,
139
140    /// Existing OpenAI container ID to reuse when `environment = "container_reference"`.
141    #[serde(default, skip_serializing_if = "Option::is_none")]
142    pub container_id: Option<String>,
143
144    /// File IDs to mount when using `container_auto`.
145    #[serde(default, skip_serializing_if = "Vec::is_empty")]
146    pub file_ids: Vec<String>,
147
148    /// Hosted skills to mount when using `container_auto`.
149    #[serde(default, skip_serializing_if = "Vec::is_empty")]
150    pub skills: Vec<OpenAIHostedSkill>,
151}
152
153impl OpenAIHostedShellConfig {
154    pub fn container_id_ref(&self) -> Option<&str> {
155        self.container_id
156            .as_deref()
157            .map(str::trim)
158            .filter(|value| !value.is_empty())
159    }
160
161    pub const fn uses_container_reference(&self) -> bool {
162        self.environment.uses_container_reference()
163    }
164
165    pub fn first_invalid_skill_message(&self) -> Option<String> {
166        if self.uses_container_reference() {
167            return None;
168        }
169
170        self.skills
171            .iter()
172            .enumerate()
173            .find_map(|(index, skill)| skill.validation_error(index))
174    }
175
176    pub fn has_valid_skill_mounts(&self) -> bool {
177        self.first_invalid_skill_message().is_none()
178    }
179
180    pub fn has_valid_reference_target(&self) -> bool {
181        !self.uses_container_reference() || self.container_id_ref().is_some()
182    }
183
184    pub fn is_valid_for_runtime(&self) -> bool {
185        self.has_valid_reference_target() && self.has_valid_skill_mounts()
186    }
187}
188
189/// OpenAI hosted tool search configuration.
190#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
191#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
192pub struct OpenAIToolSearchConfig {
193    /// Enable hosted tool search for OpenAI Responses-compatible models.
194    #[serde(default = "default_tool_search_enabled")]
195    pub enabled: bool,
196
197    /// Automatically defer loading of all tools except the core always-on set.
198    #[serde(default = "default_defer_by_default")]
199    pub defer_by_default: bool,
200
201    /// Tool names that should never be deferred (always available).
202    #[serde(default)]
203    pub always_available_tools: Vec<String>,
204}
205
206impl Default for OpenAIToolSearchConfig {
207    fn default() -> Self {
208        Self {
209            enabled: default_tool_search_enabled(),
210            defer_by_default: default_defer_by_default(),
211            always_available_tools: Vec::new(),
212        }
213    }
214}
215
216/// OpenAI-specific provider configuration
217#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
218#[derive(Debug, Clone, Deserialize, Serialize, Default)]
219pub struct OpenAIConfig {
220    /// Enable Responses API WebSocket transport for non-streaming requests.
221    /// This is an opt-in path designed for long-running, tool-heavy workflows.
222    #[serde(default)]
223    pub websocket_mode: bool,
224
225    /// Optional Responses API `store` flag.
226    /// Set to `false` to avoid server-side storage when using Responses-compatible models.
227    #[serde(default, skip_serializing_if = "Option::is_none")]
228    pub responses_store: Option<bool>,
229
230    /// Optional Responses API `include` selectors.
231    /// Example: `["reasoning.encrypted_content"]` for encrypted reasoning continuity.
232    #[serde(default, skip_serializing_if = "Vec::is_empty")]
233    pub responses_include: Vec<String>,
234
235    /// Optional native OpenAI `service_tier` request parameter.
236    /// Leave unset to inherit the Project-level default service tier.
237    /// Options: "priority"
238    #[serde(default, skip_serializing_if = "Option::is_none")]
239    pub service_tier: Option<OpenAIServiceTier>,
240
241    /// Optional hosted shell configuration for OpenAI native Responses models.
242    #[serde(default)]
243    pub hosted_shell: OpenAIHostedShellConfig,
244
245    /// Hosted tool search configuration for OpenAI Responses-compatible models.
246    #[serde(default)]
247    pub tool_search: OpenAIToolSearchConfig,
248}
249
250/// Anthropic-specific provider configuration
251#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
252#[derive(Debug, Clone, Deserialize, Serialize)]
253pub struct AnthropicConfig {
254    /// DEPRECATED: Model name validation has been removed. The Anthropic API validates
255    /// model names directly, avoiding maintenance burden and allowing flexibility.
256    /// This field is kept for backward compatibility but has no effect.
257    #[deprecated(
258        since = "0.75.0",
259        note = "Model validation removed. API validates model names directly."
260    )]
261    #[serde(default)]
262    pub skip_model_validation: bool,
263
264    /// Enable extended thinking feature for Anthropic models
265    /// When enabled, Claude uses internal reasoning before responding, providing
266    /// enhanced reasoning capabilities for complex tasks.
267    /// Only supported by Claude 4, Claude 4.5, and Claude 3.7 Sonnet models.
268    /// Claude 4.6 uses adaptive thinking instead of extended thinking.
269    /// Note: Extended thinking is now auto-enabled by default (31,999 tokens).
270    /// Set MAX_THINKING_TOKENS=63999 environment variable for 2x budget on 64K models.
271    /// See: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking
272    #[serde(default = "default_extended_thinking_enabled")]
273    pub extended_thinking_enabled: bool,
274
275    /// Beta header for interleaved thinking feature
276    #[serde(default = "default_interleaved_thinking_beta")]
277    pub interleaved_thinking_beta: String,
278
279    /// Budget tokens for extended thinking (minimum: 1024, default: 31999)
280    /// On 64K output models (Opus 4.5, Sonnet 4.5, Haiku 4.5): default 31,999, max 63,999
281    /// On 32K output models (Opus 4): max 31,999
282    /// Use MAX_THINKING_TOKENS environment variable to override.
283    #[serde(default = "default_interleaved_thinking_budget_tokens")]
284    pub interleaved_thinking_budget_tokens: u32,
285
286    /// Type value for enabling interleaved thinking
287    #[serde(default = "default_interleaved_thinking_type")]
288    pub interleaved_thinking_type_enabled: String,
289
290    /// Tool search configuration for dynamic tool discovery (advanced-tool-use beta)
291    #[serde(default)]
292    pub tool_search: ToolSearchConfig,
293
294    /// Effort level for token usage (high, medium, low)
295    /// Controls how many tokens Claude uses when responding, trading off between
296    /// response thoroughness and token efficiency.
297    /// Supported by Claude Opus 4.5/4.6 (4.5 requires effort beta header)
298    #[serde(default = "default_effort")]
299    pub effort: String,
300
301    /// Enable token counting via the count_tokens endpoint
302    /// When enabled, the agent can estimate input token counts before making API calls
303    /// Useful for proactive management of rate limits and costs
304    #[serde(default = "default_count_tokens_enabled")]
305    pub count_tokens_enabled: bool,
306}
307
308#[allow(deprecated)]
309impl Default for AnthropicConfig {
310    fn default() -> Self {
311        Self {
312            skip_model_validation: false,
313            extended_thinking_enabled: default_extended_thinking_enabled(),
314            interleaved_thinking_beta: default_interleaved_thinking_beta(),
315            interleaved_thinking_budget_tokens: default_interleaved_thinking_budget_tokens(),
316            interleaved_thinking_type_enabled: default_interleaved_thinking_type(),
317            tool_search: ToolSearchConfig::default(),
318            effort: default_effort(),
319            count_tokens_enabled: default_count_tokens_enabled(),
320        }
321    }
322}
323
324#[inline]
325fn default_count_tokens_enabled() -> bool {
326    false
327}
328
329/// Configuration for Anthropic's tool search feature (advanced-tool-use beta)
330/// Enables dynamic tool discovery for large tool catalogs (up to 10k tools)
331#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
332#[derive(Debug, Clone, Deserialize, Serialize)]
333pub struct ToolSearchConfig {
334    /// Enable tool search feature (requires advanced-tool-use-2025-11-20 beta)
335    #[serde(default = "default_tool_search_enabled")]
336    pub enabled: bool,
337
338    /// Search algorithm: "regex" (Python regex patterns) or "bm25" (natural language)
339    #[serde(default = "default_tool_search_algorithm")]
340    pub algorithm: String,
341
342    /// Automatically defer loading of all tools except core tools
343    #[serde(default = "default_defer_by_default")]
344    pub defer_by_default: bool,
345
346    /// Maximum number of tool search results to return
347    #[serde(default = "default_max_results")]
348    pub max_results: u32,
349
350    /// Tool names that should never be deferred (always available)
351    #[serde(default)]
352    pub always_available_tools: Vec<String>,
353}
354
355impl Default for ToolSearchConfig {
356    fn default() -> Self {
357        Self {
358            enabled: default_tool_search_enabled(),
359            algorithm: default_tool_search_algorithm(),
360            defer_by_default: default_defer_by_default(),
361            max_results: default_max_results(),
362            always_available_tools: vec![],
363        }
364    }
365}
366
367#[inline]
368fn default_tool_search_enabled() -> bool {
369    true
370}
371
372#[inline]
373fn default_tool_search_algorithm() -> String {
374    "regex".to_string()
375}
376
377#[inline]
378fn default_defer_by_default() -> bool {
379    true
380}
381
382#[inline]
383fn default_max_results() -> u32 {
384    5
385}
386
387#[inline]
388fn default_extended_thinking_enabled() -> bool {
389    true
390}
391
392#[inline]
393fn default_interleaved_thinking_beta() -> String {
394    "interleaved-thinking-2025-05-14".to_string()
395}
396
397#[inline]
398fn default_interleaved_thinking_budget_tokens() -> u32 {
399    31999
400}
401
402#[inline]
403fn default_interleaved_thinking_type() -> String {
404    "enabled".to_string()
405}
406
407#[inline]
408fn default_effort() -> String {
409    "low".to_string()
410}
411
412#[cfg(test)]
413mod tests {
414    use super::{
415        AnthropicConfig, OpenAIConfig, OpenAIHostedShellConfig, OpenAIHostedShellEnvironment,
416        OpenAIHostedSkill, OpenAIHostedSkillVersion, OpenAIServiceTier,
417    };
418
419    #[test]
420    fn openai_config_defaults_to_websocket_mode_disabled() {
421        let config = OpenAIConfig::default();
422        assert!(!config.websocket_mode);
423        assert_eq!(config.responses_store, None);
424        assert!(config.responses_include.is_empty());
425        assert_eq!(config.service_tier, None);
426        assert_eq!(config.hosted_shell, OpenAIHostedShellConfig::default());
427        assert!(config.tool_search.enabled);
428        assert!(config.tool_search.defer_by_default);
429        assert!(config.tool_search.always_available_tools.is_empty());
430    }
431
432    #[test]
433    fn openai_config_parses_websocket_mode_opt_in() {
434        let parsed: OpenAIConfig =
435            toml::from_str("websocket_mode = true").expect("config should parse");
436        assert!(parsed.websocket_mode);
437        assert_eq!(parsed.responses_store, None);
438        assert!(parsed.responses_include.is_empty());
439        assert_eq!(parsed.service_tier, None);
440        assert_eq!(parsed.hosted_shell, OpenAIHostedShellConfig::default());
441        assert_eq!(parsed.tool_search, super::OpenAIToolSearchConfig::default());
442    }
443
444    #[test]
445    fn openai_config_parses_responses_options() {
446        let parsed: OpenAIConfig = toml::from_str(
447            r#"
448responses_store = false
449responses_include = ["reasoning.encrypted_content", "output_text.annotations"]
450"#,
451        )
452        .expect("config should parse");
453        assert_eq!(parsed.responses_store, Some(false));
454        assert_eq!(
455            parsed.responses_include,
456            vec![
457                "reasoning.encrypted_content".to_string(),
458                "output_text.annotations".to_string()
459            ]
460        );
461        assert_eq!(parsed.service_tier, None);
462        assert_eq!(parsed.hosted_shell, OpenAIHostedShellConfig::default());
463    }
464
465    #[test]
466    fn openai_config_parses_service_tier() {
467        let parsed: OpenAIConfig =
468            toml::from_str(r#"service_tier = "priority""#).expect("config should parse");
469        assert_eq!(parsed.service_tier, Some(OpenAIServiceTier::Priority));
470    }
471
472    #[test]
473    fn openai_config_parses_hosted_shell() {
474        let parsed: OpenAIConfig = toml::from_str(
475            r#"
476[hosted_shell]
477enabled = true
478environment = "container_auto"
479file_ids = ["file_123"]
480
481[[hosted_shell.skills]]
482type = "skill_reference"
483skill_id = "skill_123"
484"#,
485        )
486        .expect("config should parse");
487
488        assert!(parsed.hosted_shell.enabled);
489        assert_eq!(
490            parsed.hosted_shell.environment,
491            OpenAIHostedShellEnvironment::ContainerAuto
492        );
493        assert_eq!(parsed.hosted_shell.file_ids, vec!["file_123".to_string()]);
494        assert_eq!(
495            parsed.hosted_shell.skills,
496            vec![OpenAIHostedSkill::SkillReference {
497                skill_id: "skill_123".to_string(),
498                version: OpenAIHostedSkillVersion::default(),
499            }]
500        );
501    }
502
503    #[test]
504    fn openai_config_parses_hosted_shell_pinned_version_and_inline_bundle() {
505        let parsed: OpenAIConfig = toml::from_str(
506            r#"
507[hosted_shell]
508enabled = true
509
510[[hosted_shell.skills]]
511type = "skill_reference"
512skill_id = "skill_123"
513version = 2
514
515[[hosted_shell.skills]]
516type = "inline"
517bundle_b64 = "UEsFBgAAAAAAAA=="
518sha256 = "deadbeef"
519"#,
520        )
521        .expect("config should parse");
522
523        assert_eq!(
524            parsed.hosted_shell.skills,
525            vec![
526                OpenAIHostedSkill::SkillReference {
527                    skill_id: "skill_123".to_string(),
528                    version: OpenAIHostedSkillVersion::Number(2),
529                },
530                OpenAIHostedSkill::Inline {
531                    bundle_b64: "UEsFBgAAAAAAAA==".to_string(),
532                    sha256: Some("deadbeef".to_string()),
533                },
534            ]
535        );
536    }
537
538    #[test]
539    fn openai_config_parses_tool_search() {
540        let parsed: OpenAIConfig = toml::from_str(
541            r#"
542[tool_search]
543enabled = false
544defer_by_default = false
545always_available_tools = ["unified_search", "custom_tool"]
546"#,
547        )
548        .expect("config should parse");
549
550        assert!(!parsed.tool_search.enabled);
551        assert!(!parsed.tool_search.defer_by_default);
552        assert_eq!(
553            parsed.tool_search.always_available_tools,
554            vec!["unified_search".to_string(), "custom_tool".to_string()]
555        );
556    }
557
558    #[test]
559    fn anthropic_tool_search_defaults_to_enabled() {
560        let config = AnthropicConfig::default();
561
562        assert!(config.tool_search.enabled);
563        assert!(config.tool_search.defer_by_default);
564        assert_eq!(config.tool_search.algorithm, "regex");
565        assert!(config.tool_search.always_available_tools.is_empty());
566    }
567
568    #[test]
569    fn hosted_shell_container_reference_requires_non_empty_container_id() {
570        let config = OpenAIHostedShellConfig {
571            enabled: true,
572            environment: OpenAIHostedShellEnvironment::ContainerReference,
573            container_id: Some("   ".to_string()),
574            file_ids: Vec::new(),
575            skills: Vec::new(),
576        };
577
578        assert!(!config.has_valid_reference_target());
579        assert!(config.container_id_ref().is_none());
580    }
581
582    #[test]
583    fn hosted_shell_reports_invalid_skill_reference_mounts() {
584        let config = OpenAIHostedShellConfig {
585            enabled: true,
586            environment: OpenAIHostedShellEnvironment::ContainerAuto,
587            container_id: None,
588            file_ids: Vec::new(),
589            skills: vec![OpenAIHostedSkill::SkillReference {
590                skill_id: "   ".to_string(),
591                version: OpenAIHostedSkillVersion::default(),
592            }],
593        };
594
595        let message = config
596            .first_invalid_skill_message()
597            .expect("invalid mount should be reported");
598
599        assert!(message.contains("provider.openai.hosted_shell.skills[0].skill_id"));
600        assert!(!config.has_valid_skill_mounts());
601        assert!(!config.is_valid_for_runtime());
602    }
603
604    #[test]
605    fn hosted_shell_ignores_skill_validation_for_container_reference() {
606        let config = OpenAIHostedShellConfig {
607            enabled: true,
608            environment: OpenAIHostedShellEnvironment::ContainerReference,
609            container_id: Some("cntr_123".to_string()),
610            file_ids: Vec::new(),
611            skills: vec![OpenAIHostedSkill::Inline {
612                bundle_b64: "   ".to_string(),
613                sha256: None,
614            }],
615        };
616
617        assert!(config.first_invalid_skill_message().is_none());
618        assert!(config.has_valid_skill_mounts());
619        assert!(config.is_valid_for_runtime());
620    }
621}