vtcode_config/core/
provider.rs1use serde::{Deserialize, Serialize};
2
3#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
5#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
6#[serde(rename_all = "lowercase")]
7pub enum OpenAIServiceTier {
8 Priority,
9}
10
11impl OpenAIServiceTier {
12 pub const fn as_str(self) -> &'static str {
13 match self {
14 Self::Priority => "priority",
15 }
16 }
17}
18
19#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
21#[derive(Debug, Clone, Deserialize, Serialize, Default)]
22pub struct OpenAIConfig {
23 #[serde(default)]
26 pub websocket_mode: bool,
27
28 #[serde(default, skip_serializing_if = "Option::is_none")]
31 pub responses_store: Option<bool>,
32
33 #[serde(default, skip_serializing_if = "Vec::is_empty")]
36 pub responses_include: Vec<String>,
37
38 #[serde(default, skip_serializing_if = "Option::is_none")]
42 pub service_tier: Option<OpenAIServiceTier>,
43}
44
45#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
47#[derive(Debug, Clone, Deserialize, Serialize)]
48pub struct AnthropicConfig {
49 #[deprecated(
53 since = "0.75.0",
54 note = "Model validation removed. API validates model names directly."
55 )]
56 #[serde(default)]
57 pub skip_model_validation: bool,
58
59 #[serde(default = "default_extended_thinking_enabled")]
68 pub extended_thinking_enabled: bool,
69
70 #[serde(default = "default_interleaved_thinking_beta")]
72 pub interleaved_thinking_beta: String,
73
74 #[serde(default = "default_interleaved_thinking_budget_tokens")]
79 pub interleaved_thinking_budget_tokens: u32,
80
81 #[serde(default = "default_interleaved_thinking_type")]
83 pub interleaved_thinking_type_enabled: String,
84
85 #[serde(default)]
87 pub tool_search: ToolSearchConfig,
88
89 #[serde(default = "default_effort")]
94 pub effort: String,
95
96 #[serde(default = "default_count_tokens_enabled")]
100 pub count_tokens_enabled: bool,
101}
102
103#[allow(deprecated)]
104impl Default for AnthropicConfig {
105 fn default() -> Self {
106 Self {
107 skip_model_validation: false,
108 extended_thinking_enabled: default_extended_thinking_enabled(),
109 interleaved_thinking_beta: default_interleaved_thinking_beta(),
110 interleaved_thinking_budget_tokens: default_interleaved_thinking_budget_tokens(),
111 interleaved_thinking_type_enabled: default_interleaved_thinking_type(),
112 tool_search: ToolSearchConfig::default(),
113 effort: default_effort(),
114 count_tokens_enabled: default_count_tokens_enabled(),
115 }
116 }
117}
118
119#[inline]
120fn default_count_tokens_enabled() -> bool {
121 false
122}
123
124#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
127#[derive(Debug, Clone, Deserialize, Serialize)]
128pub struct ToolSearchConfig {
129 #[serde(default)]
131 pub enabled: bool,
132
133 #[serde(default = "default_tool_search_algorithm")]
135 pub algorithm: String,
136
137 #[serde(default = "default_defer_by_default")]
139 pub defer_by_default: bool,
140
141 #[serde(default = "default_max_results")]
143 pub max_results: u32,
144
145 #[serde(default)]
147 pub always_available_tools: Vec<String>,
148}
149
150impl Default for ToolSearchConfig {
151 fn default() -> Self {
152 Self {
153 enabled: false,
154 algorithm: default_tool_search_algorithm(),
155 defer_by_default: default_defer_by_default(),
156 max_results: default_max_results(),
157 always_available_tools: vec![],
158 }
159 }
160}
161
162#[inline]
163fn default_tool_search_algorithm() -> String {
164 "regex".to_string()
165}
166
167#[inline]
168fn default_defer_by_default() -> bool {
169 true
170}
171
172#[inline]
173fn default_max_results() -> u32 {
174 5
175}
176
177#[inline]
178fn default_extended_thinking_enabled() -> bool {
179 true
180}
181
182#[inline]
183fn default_interleaved_thinking_beta() -> String {
184 "interleaved-thinking-2025-05-14".to_string()
185}
186
187#[inline]
188fn default_interleaved_thinking_budget_tokens() -> u32 {
189 31999
190}
191
192#[inline]
193fn default_interleaved_thinking_type() -> String {
194 "enabled".to_string()
195}
196
197#[inline]
198fn default_effort() -> String {
199 "low".to_string()
200}
201
202#[cfg(test)]
203mod tests {
204 use super::{OpenAIConfig, OpenAIServiceTier};
205
206 #[test]
207 fn openai_config_defaults_to_websocket_mode_disabled() {
208 let config = OpenAIConfig::default();
209 assert!(!config.websocket_mode);
210 assert_eq!(config.responses_store, None);
211 assert!(config.responses_include.is_empty());
212 assert_eq!(config.service_tier, None);
213 }
214
215 #[test]
216 fn openai_config_parses_websocket_mode_opt_in() {
217 let parsed: OpenAIConfig =
218 toml::from_str("websocket_mode = true").expect("config should parse");
219 assert!(parsed.websocket_mode);
220 assert_eq!(parsed.responses_store, None);
221 assert!(parsed.responses_include.is_empty());
222 assert_eq!(parsed.service_tier, None);
223 }
224
225 #[test]
226 fn openai_config_parses_responses_options() {
227 let parsed: OpenAIConfig = toml::from_str(
228 r#"
229responses_store = false
230responses_include = ["reasoning.encrypted_content", "output_text.annotations"]
231"#,
232 )
233 .expect("config should parse");
234 assert_eq!(parsed.responses_store, Some(false));
235 assert_eq!(
236 parsed.responses_include,
237 vec![
238 "reasoning.encrypted_content".to_string(),
239 "output_text.annotations".to_string()
240 ]
241 );
242 assert_eq!(parsed.service_tier, None);
243 }
244
245 #[test]
246 fn openai_config_parses_service_tier() {
247 let parsed: OpenAIConfig =
248 toml::from_str(r#"service_tier = "priority""#).expect("config should parse");
249 assert_eq!(parsed.service_tier, Some(OpenAIServiceTier::Priority));
250 }
251}