vtcode_core/llm/providers/
openai.rs

1use crate::config::constants::{models, urls};
2use crate::config::core::{OpenAIPromptCacheSettings, PromptCachingConfig};
3use crate::config::models::Provider;
4use crate::config::types::ReasoningEffortLevel;
5use crate::llm::client::LLMClient;
6use crate::llm::error_display;
7use crate::llm::provider::{
8    FinishReason, LLMError, LLMProvider, LLMRequest, LLMResponse, Message, MessageRole, ToolCall,
9    ToolChoice, ToolDefinition,
10};
11use crate::llm::rig_adapter::reasoning_parameters_for;
12use crate::llm::types as llm_types;
13use async_trait::async_trait;
14use reqwest::Client as HttpClient;
15use serde_json::{Value, json};
16use std::collections::HashSet;
17
18const MAX_COMPLETION_TOKENS_FIELD: &str = "max_completion_tokens";
19
20use super::{extract_reasoning_trace, gpt5_codex_developer_prompt};
21
22pub struct OpenAIProvider {
23    api_key: String,
24    http_client: HttpClient,
25    base_url: String,
26    model: String,
27    prompt_cache_enabled: bool,
28    prompt_cache_settings: OpenAIPromptCacheSettings,
29}
30
31impl OpenAIProvider {
32    fn serialize_tools(tools: &[ToolDefinition]) -> Option<Value> {
33        if tools.is_empty() {
34            return None;
35        }
36
37        let serialized_tools = tools.iter().map(|tool| json!(tool)).collect::<Vec<Value>>();
38
39        Some(Value::Array(serialized_tools))
40    }
41
42    fn is_gpt5_codex_model(model: &str) -> bool {
43        model == models::openai::GPT_5_CODEX
44    }
45
46    fn is_reasoning_model(model: &str) -> bool {
47        models::openai::REASONING_MODELS
48            .iter()
49            .any(|candidate| *candidate == model)
50    }
51
52    fn uses_responses_api(model: &str) -> bool {
53        Self::is_gpt5_codex_model(model) || Self::is_reasoning_model(model)
54    }
55
56    pub fn new(api_key: String) -> Self {
57        Self::with_model_internal(api_key, models::openai::DEFAULT_MODEL.to_string(), None)
58    }
59
60    pub fn with_model(api_key: String, model: String) -> Self {
61        Self::with_model_internal(api_key, model, None)
62    }
63
64    pub fn from_config(
65        api_key: Option<String>,
66        model: Option<String>,
67        base_url: Option<String>,
68        prompt_cache: Option<PromptCachingConfig>,
69    ) -> Self {
70        let api_key_value = api_key.unwrap_or_default();
71        let mut provider = if let Some(model_value) = model {
72            Self::with_model_internal(api_key_value, model_value, prompt_cache)
73        } else {
74            Self::with_model_internal(
75                api_key_value,
76                models::openai::DEFAULT_MODEL.to_string(),
77                prompt_cache,
78            )
79        };
80        if let Some(base) = base_url {
81            provider.base_url = base;
82        }
83        provider
84    }
85
86    fn with_model_internal(
87        api_key: String,
88        model: String,
89        prompt_cache: Option<PromptCachingConfig>,
90    ) -> Self {
91        let (prompt_cache_enabled, prompt_cache_settings) =
92            Self::extract_prompt_cache_settings(prompt_cache);
93
94        Self {
95            api_key,
96            http_client: HttpClient::new(),
97            base_url: urls::OPENAI_API_BASE.to_string(),
98            model,
99            prompt_cache_enabled,
100            prompt_cache_settings,
101        }
102    }
103
104    fn extract_prompt_cache_settings(
105        prompt_cache: Option<PromptCachingConfig>,
106    ) -> (bool, OpenAIPromptCacheSettings) {
107        if let Some(cfg) = prompt_cache {
108            let provider_settings = cfg.providers.openai;
109            let enabled = cfg.enabled && provider_settings.enabled;
110            (enabled, provider_settings)
111        } else {
112            (false, OpenAIPromptCacheSettings::default())
113        }
114    }
115
116    fn supports_temperature_parameter(model: &str) -> bool {
117        // GPT-5 variants and GPT-5 Codex models don't support temperature parameter
118        // All other OpenAI models generally support it
119        !Self::is_gpt5_codex_model(model)
120            && model != models::openai::GPT_5
121            && model != models::openai::GPT_5_MINI
122            && model != models::openai::GPT_5_NANO
123    }
124
125    fn default_request(&self, prompt: &str) -> LLMRequest {
126        LLMRequest {
127            messages: vec![Message::user(prompt.to_string())],
128            system_prompt: None,
129            tools: None,
130            model: self.model.clone(),
131            max_tokens: None,
132            temperature: None,
133            stream: false,
134            tool_choice: None,
135            parallel_tool_calls: None,
136            parallel_tool_config: None,
137            reasoning_effort: None,
138        }
139    }
140
141    fn parse_client_prompt(&self, prompt: &str) -> LLMRequest {
142        let trimmed = prompt.trim_start();
143        if trimmed.starts_with('{') {
144            if let Ok(value) = serde_json::from_str::<Value>(trimmed) {
145                if let Some(request) = self.parse_chat_request(&value) {
146                    return request;
147                }
148            }
149        }
150
151        self.default_request(prompt)
152    }
153
154    fn parse_chat_request(&self, value: &Value) -> Option<LLMRequest> {
155        let messages_value = value.get("messages")?.as_array()?;
156        let mut system_prompt = None;
157        let mut messages = Vec::new();
158
159        for entry in messages_value {
160            let role = entry
161                .get("role")
162                .and_then(|r| r.as_str())
163                .unwrap_or(crate::config::constants::message_roles::USER);
164            let content = entry.get("content");
165            let text_content = content.map(Self::extract_content_text).unwrap_or_default();
166
167            match role {
168                "system" => {
169                    if system_prompt.is_none() && !text_content.is_empty() {
170                        system_prompt = Some(text_content);
171                    }
172                }
173                "assistant" => {
174                    let tool_calls = entry
175                        .get("tool_calls")
176                        .and_then(|tc| tc.as_array())
177                        .map(|calls| {
178                            calls
179                                .iter()
180                                .filter_map(|call| {
181                                    let id = call.get("id").and_then(|v| v.as_str())?;
182                                    let function = call.get("function")?;
183                                    let name = function.get("name").and_then(|v| v.as_str())?;
184                                    let arguments = function.get("arguments");
185                                    let serialized = arguments.map_or("{}".to_string(), |value| {
186                                        if value.is_string() {
187                                            value.as_str().unwrap_or("").to_string()
188                                        } else {
189                                            value.to_string()
190                                        }
191                                    });
192                                    Some(ToolCall::function(
193                                        id.to_string(),
194                                        name.to_string(),
195                                        serialized,
196                                    ))
197                                })
198                                .collect::<Vec<_>>()
199                        })
200                        .filter(|calls| !calls.is_empty());
201
202                    let message = if let Some(calls) = tool_calls {
203                        Message {
204                            role: MessageRole::Assistant,
205                            content: text_content,
206                            tool_calls: Some(calls),
207                            tool_call_id: None,
208                        }
209                    } else {
210                        Message::assistant(text_content)
211                    };
212                    messages.push(message);
213                }
214                "tool" => {
215                    let tool_call_id = entry
216                        .get("tool_call_id")
217                        .and_then(|id| id.as_str())
218                        .map(|s| s.to_string());
219                    let content_value = entry
220                        .get("content")
221                        .map(|value| {
222                            if text_content.is_empty() {
223                                value.to_string()
224                            } else {
225                                text_content.clone()
226                            }
227                        })
228                        .unwrap_or_else(|| text_content.clone());
229                    messages.push(Message {
230                        role: MessageRole::Tool,
231                        content: content_value,
232                        tool_calls: None,
233                        tool_call_id,
234                    });
235                }
236                _ => {
237                    messages.push(Message::user(text_content));
238                }
239            }
240        }
241
242        if messages.is_empty() {
243            return None;
244        }
245
246        let tools = value.get("tools").and_then(|tools_value| {
247            let tools_array = tools_value.as_array()?;
248            let converted: Vec<_> = tools_array
249                .iter()
250                .filter_map(|tool| {
251                    let function = tool.get("function")?;
252                    let name = function.get("name").and_then(|n| n.as_str())?;
253                    let description = function
254                        .get("description")
255                        .and_then(|d| d.as_str())
256                        .unwrap_or("")
257                        .to_string();
258                    let parameters = function
259                        .get("parameters")
260                        .cloned()
261                        .unwrap_or_else(|| json!({}));
262                    Some(ToolDefinition::function(
263                        name.to_string(),
264                        description,
265                        parameters,
266                    ))
267                })
268                .collect();
269
270            if converted.is_empty() {
271                None
272            } else {
273                Some(converted)
274            }
275        });
276        let temperature = value
277            .get("temperature")
278            .and_then(|v| v.as_f64())
279            .map(|v| v as f32);
280        let max_tokens = value
281            .get(MAX_COMPLETION_TOKENS_FIELD)
282            .or_else(|| value.get("max_tokens"))
283            .and_then(|v| v.as_u64())
284            .map(|v| v as u32);
285        let stream = value
286            .get("stream")
287            .and_then(|v| v.as_bool())
288            .unwrap_or(false);
289        let tool_choice = value.get("tool_choice").and_then(Self::parse_tool_choice);
290        let parallel_tool_calls = value.get("parallel_tool_calls").and_then(|v| v.as_bool());
291        let reasoning_effort = value
292            .get("reasoning_effort")
293            .and_then(|v| v.as_str())
294            .and_then(ReasoningEffortLevel::from_str)
295            .or_else(|| {
296                value
297                    .get("reasoning")
298                    .and_then(|r| r.get("effort"))
299                    .and_then(|effort| effort.as_str())
300                    .and_then(ReasoningEffortLevel::from_str)
301            });
302
303        let model = value
304            .get("model")
305            .and_then(|m| m.as_str())
306            .unwrap_or(&self.model)
307            .to_string();
308
309        Some(LLMRequest {
310            messages,
311            system_prompt,
312            tools,
313            model,
314            max_tokens,
315            temperature,
316            stream,
317            tool_choice,
318            parallel_tool_calls,
319            parallel_tool_config: None,
320            reasoning_effort,
321        })
322    }
323
324    fn extract_content_text(content: &Value) -> String {
325        match content {
326            Value::String(text) => text.to_string(),
327            Value::Array(parts) => parts
328                .iter()
329                .filter_map(|part| {
330                    if let Some(text) = part.get("text").and_then(|t| t.as_str()) {
331                        Some(text.to_string())
332                    } else if let Some(Value::String(text)) = part.get("content") {
333                        Some(text.clone())
334                    } else {
335                        None
336                    }
337                })
338                .collect::<Vec<_>>()
339                .join(""),
340            _ => String::new(),
341        }
342    }
343
344    fn parse_tool_choice(choice: &Value) -> Option<ToolChoice> {
345        match choice {
346            Value::String(value) => match value.as_str() {
347                "auto" => Some(ToolChoice::auto()),
348                "none" => Some(ToolChoice::none()),
349                "required" => Some(ToolChoice::any()),
350                _ => None,
351            },
352            Value::Object(map) => {
353                let choice_type = map.get("type").and_then(|t| t.as_str())?;
354                match choice_type {
355                    "function" => map
356                        .get("function")
357                        .and_then(|f| f.get("name"))
358                        .and_then(|n| n.as_str())
359                        .map(|name| ToolChoice::function(name.to_string())),
360                    "auto" => Some(ToolChoice::auto()),
361                    "none" => Some(ToolChoice::none()),
362                    "any" | "required" => Some(ToolChoice::any()),
363                    _ => None,
364                }
365            }
366            _ => None,
367        }
368    }
369
370    fn convert_to_openai_format(&self, request: &LLMRequest) -> Result<Value, LLMError> {
371        let mut messages = Vec::new();
372        let mut active_tool_call_ids: HashSet<String> = HashSet::new();
373
374        if let Some(system_prompt) = &request.system_prompt {
375            messages.push(json!({
376                "role": crate::config::constants::message_roles::SYSTEM,
377                "content": system_prompt
378            }));
379        }
380
381        for msg in &request.messages {
382            let role = msg.role.as_openai_str();
383            let mut message = json!({
384                "role": role,
385                "content": msg.content
386            });
387            let mut skip_message = false;
388
389            if msg.role == MessageRole::Assistant {
390                if let Some(tool_calls) = &msg.tool_calls {
391                    if !tool_calls.is_empty() {
392                        let tool_calls_json: Vec<Value> = tool_calls
393                            .iter()
394                            .map(|tc| {
395                                active_tool_call_ids.insert(tc.id.clone());
396                                json!({
397                                    "id": tc.id,
398                                    "type": "function",
399                                    "function": {
400                                        "name": tc.function.name,
401                                        "arguments": tc.function.arguments
402                                    }
403                                })
404                            })
405                            .collect();
406                        message["tool_calls"] = Value::Array(tool_calls_json);
407                    }
408                }
409            }
410
411            if msg.role == MessageRole::Tool {
412                match &msg.tool_call_id {
413                    Some(tool_call_id) if active_tool_call_ids.contains(tool_call_id) => {
414                        message["tool_call_id"] = Value::String(tool_call_id.clone());
415                        active_tool_call_ids.remove(tool_call_id);
416                    }
417                    Some(_) | None => {
418                        skip_message = true;
419                    }
420                }
421            }
422
423            if !skip_message {
424                messages.push(message);
425            }
426        }
427
428        if messages.is_empty() {
429            let formatted_error = error_display::format_llm_error("OpenAI", "No messages provided");
430            return Err(LLMError::InvalidRequest(formatted_error));
431        }
432
433        let mut openai_request = json!({
434            "model": request.model,
435            "messages": messages,
436            "stream": request.stream
437        });
438
439        if let Some(max_tokens) = request.max_tokens {
440            if request.temperature.is_some() && Self::supports_temperature_parameter(&request.model)
441            {
442                if let Some(temperature) = request.temperature {
443                    openai_request["temperature"] = json!(temperature);
444                }
445            }
446            openai_request[MAX_COMPLETION_TOKENS_FIELD] = json!(max_tokens);
447        }
448
449        if let Some(tools) = &request.tools {
450            if let Some(serialized) = Self::serialize_tools(tools) {
451                openai_request["tools"] = serialized;
452            }
453        }
454
455        if let Some(tool_choice) = &request.tool_choice {
456            openai_request["tool_choice"] = tool_choice.to_provider_format("openai");
457        }
458
459        if let Some(parallel) = request.parallel_tool_calls {
460            openai_request["parallel_tool_calls"] = Value::Bool(parallel);
461        }
462
463        if let Some(effort) = request.reasoning_effort {
464            if self.supports_reasoning_effort(&request.model) {
465                if let Some(payload) = reasoning_parameters_for(Provider::OpenAI, effort) {
466                    openai_request["reasoning"] = payload;
467                } else {
468                    openai_request["reasoning"] = json!({ "effort": effort.as_str() });
469                }
470            }
471        }
472
473        Ok(openai_request)
474    }
475
476    fn convert_to_openai_responses_format(&self, request: &LLMRequest) -> Result<Value, LLMError> {
477        let input = if Self::is_gpt5_codex_model(&request.model) {
478            build_codex_responses_input_openai(request)?
479        } else {
480            build_standard_responses_input_openai(request)?
481        };
482
483        if input.is_empty() {
484            let formatted_error =
485                error_display::format_llm_error("OpenAI", "No messages provided for Responses API");
486            return Err(LLMError::InvalidRequest(formatted_error));
487        }
488
489        let mut openai_request = json!({
490            "model": request.model,
491            "input": input,
492            "stream": request.stream
493        });
494
495        if let Some(max_tokens) = request.max_tokens {
496            if request.temperature.is_some() && Self::supports_temperature_parameter(&request.model)
497            {
498                if let Some(temperature) = request.temperature {
499                    openai_request["temperature"] = json!(temperature);
500                }
501            }
502            openai_request["max_output_tokens"] = json!(max_tokens);
503        }
504
505        if let Some(tools) = &request.tools {
506            if let Some(serialized) = Self::serialize_tools(tools) {
507                openai_request["tools"] = serialized;
508            }
509        }
510
511        if let Some(tool_choice) = &request.tool_choice {
512            openai_request["tool_choice"] = tool_choice.to_provider_format("openai");
513        }
514
515        if let Some(parallel) = request.parallel_tool_calls {
516            openai_request["parallel_tool_calls"] = Value::Bool(parallel);
517        }
518
519        if let Some(effort) = request.reasoning_effort {
520            if self.supports_reasoning_effort(&request.model) {
521                if let Some(payload) = reasoning_parameters_for(Provider::OpenAI, effort) {
522                    openai_request["reasoning"] = payload;
523                } else {
524                    openai_request["reasoning"] = json!({ "effort": effort.as_str() });
525                }
526            }
527        }
528
529        if Self::is_reasoning_model(&request.model) {
530            openai_request["reasoning"] = json!({ "effort": "medium" });
531        }
532
533        Ok(openai_request)
534    }
535
536    fn parse_openai_response(&self, response_json: Value) -> Result<LLMResponse, LLMError> {
537        let choices = response_json
538            .get("choices")
539            .and_then(|c| c.as_array())
540            .ok_or_else(|| {
541                let formatted_error = error_display::format_llm_error(
542                    "OpenAI",
543                    "Invalid response format: missing choices",
544                );
545                LLMError::Provider(formatted_error)
546            })?;
547
548        if choices.is_empty() {
549            let formatted_error =
550                error_display::format_llm_error("OpenAI", "No choices in response");
551            return Err(LLMError::Provider(formatted_error));
552        }
553
554        let choice = &choices[0];
555        let message = choice.get("message").ok_or_else(|| {
556            let formatted_error = error_display::format_llm_error(
557                "OpenAI",
558                "Invalid response format: missing message",
559            );
560            LLMError::Provider(formatted_error)
561        })?;
562
563        let content = match message.get("content") {
564            Some(Value::String(text)) => Some(text.to_string()),
565            Some(Value::Array(parts)) => {
566                let text = parts
567                    .iter()
568                    .filter_map(|part| part.get("text").and_then(|t| t.as_str()))
569                    .collect::<Vec<_>>()
570                    .join("");
571                if text.is_empty() { None } else { Some(text) }
572            }
573            _ => None,
574        };
575
576        let tool_calls = message
577            .get("tool_calls")
578            .and_then(|tc| tc.as_array())
579            .map(|calls| {
580                calls
581                    .iter()
582                    .filter_map(|call| {
583                        let id = call.get("id").and_then(|v| v.as_str())?;
584                        let function = call.get("function")?;
585                        let name = function.get("name").and_then(|v| v.as_str())?;
586                        let arguments = function.get("arguments");
587                        let serialized = arguments.map_or("{}".to_string(), |value| {
588                            if value.is_string() {
589                                value.as_str().unwrap_or("").to_string()
590                            } else {
591                                value.to_string()
592                            }
593                        });
594                        Some(ToolCall::function(
595                            id.to_string(),
596                            name.to_string(),
597                            serialized,
598                        ))
599                    })
600                    .collect::<Vec<_>>()
601            })
602            .filter(|calls| !calls.is_empty());
603
604        let reasoning = message
605            .get("reasoning")
606            .and_then(extract_reasoning_trace)
607            .or_else(|| choice.get("reasoning").and_then(extract_reasoning_trace));
608
609        let finish_reason = choice
610            .get("finish_reason")
611            .and_then(|fr| fr.as_str())
612            .map(|fr| match fr {
613                "stop" => FinishReason::Stop,
614                "length" => FinishReason::Length,
615                "tool_calls" => FinishReason::ToolCalls,
616                "content_filter" => FinishReason::ContentFilter,
617                other => FinishReason::Error(other.to_string()),
618            })
619            .unwrap_or(FinishReason::Stop);
620
621        Ok(LLMResponse {
622            content,
623            tool_calls,
624            usage: response_json.get("usage").map(|usage_value| {
625                let cached_prompt_tokens =
626                    if self.prompt_cache_enabled && self.prompt_cache_settings.surface_metrics {
627                        usage_value
628                            .get("prompt_tokens_details")
629                            .and_then(|details| details.get("cached_tokens"))
630                            .and_then(|value| value.as_u64())
631                            .map(|value| value as u32)
632                    } else {
633                        None
634                    };
635
636                crate::llm::provider::Usage {
637                    prompt_tokens: usage_value
638                        .get("prompt_tokens")
639                        .and_then(|pt| pt.as_u64())
640                        .unwrap_or(0) as u32,
641                    completion_tokens: usage_value
642                        .get("completion_tokens")
643                        .and_then(|ct| ct.as_u64())
644                        .unwrap_or(0) as u32,
645                    total_tokens: usage_value
646                        .get("total_tokens")
647                        .and_then(|tt| tt.as_u64())
648                        .unwrap_or(0) as u32,
649                    cached_prompt_tokens,
650                    cache_creation_tokens: None,
651                    cache_read_tokens: None,
652                }
653            }),
654            finish_reason,
655            reasoning,
656        })
657    }
658
659    fn parse_openai_responses_response(
660        &self,
661        response_json: Value,
662    ) -> Result<LLMResponse, LLMError> {
663        let output = response_json
664            .get("output")
665            .or_else(|| response_json.get("choices"))
666            .and_then(|value| value.as_array())
667            .ok_or_else(|| {
668                let formatted_error = error_display::format_llm_error(
669                    "OpenAI",
670                    "Invalid response format: missing output",
671                );
672                LLMError::Provider(formatted_error)
673            })?;
674
675        if output.is_empty() {
676            let formatted_error =
677                error_display::format_llm_error("OpenAI", "No output in response");
678            return Err(LLMError::Provider(formatted_error));
679        }
680
681        let mut content_fragments = Vec::new();
682        let mut reasoning_fragments = Vec::new();
683        let mut tool_calls_vec = Vec::new();
684
685        for item in output {
686            let item_type = item
687                .get("type")
688                .and_then(|value| value.as_str())
689                .unwrap_or("");
690            if item_type != "message" {
691                continue;
692            }
693
694            if let Some(content_array) = item.get("content").and_then(|value| value.as_array()) {
695                for entry in content_array {
696                    let entry_type = entry
697                        .get("type")
698                        .and_then(|value| value.as_str())
699                        .unwrap_or("");
700                    match entry_type {
701                        "output_text" | "text" => {
702                            if let Some(text) = entry.get("text").and_then(|value| value.as_str()) {
703                                if !text.is_empty() {
704                                    content_fragments.push(text.to_string());
705                                }
706                            }
707                        }
708                        "reasoning" => {
709                            if let Some(text) = entry.get("text").and_then(|value| value.as_str()) {
710                                if !text.is_empty() {
711                                    reasoning_fragments.push(text.to_string());
712                                }
713                            }
714                        }
715                        "tool_call" => {
716                            let (name_value, arguments_value) = if let Some(function) =
717                                entry.get("function").and_then(|value| value.as_object())
718                            {
719                                let name = function.get("name").and_then(|value| value.as_str());
720                                let arguments = function.get("arguments");
721                                (name, arguments)
722                            } else {
723                                let name = entry.get("name").and_then(|value| value.as_str());
724                                let arguments = entry.get("arguments");
725                                (name, arguments)
726                            };
727
728                            if let Some(name) = name_value {
729                                let id = entry
730                                    .get("id")
731                                    .and_then(|value| value.as_str())
732                                    .unwrap_or_else(|| "");
733                                let serialized =
734                                    arguments_value.map_or("{}".to_string(), |value| {
735                                        if value.is_string() {
736                                            value.as_str().unwrap_or("").to_string()
737                                        } else {
738                                            value.to_string()
739                                        }
740                                    });
741                                tool_calls_vec.push(ToolCall::function(
742                                    id.to_string(),
743                                    name.to_string(),
744                                    serialized,
745                                ));
746                            }
747                        }
748                        _ => {}
749                    }
750                }
751            }
752        }
753
754        let content = if content_fragments.is_empty() {
755            None
756        } else {
757            Some(content_fragments.join(""))
758        };
759
760        let reasoning = if reasoning_fragments.is_empty() {
761            None
762        } else {
763            Some(reasoning_fragments.join(""))
764        };
765
766        let tool_calls = if tool_calls_vec.is_empty() {
767            None
768        } else {
769            Some(tool_calls_vec)
770        };
771
772        let usage = response_json.get("usage").map(|usage_value| {
773            let cached_prompt_tokens =
774                if self.prompt_cache_enabled && self.prompt_cache_settings.surface_metrics {
775                    usage_value
776                        .get("prompt_tokens_details")
777                        .and_then(|details| details.get("cached_tokens"))
778                        .or_else(|| usage_value.get("prompt_cache_hit_tokens"))
779                        .and_then(|value| value.as_u64())
780                        .map(|value| value as u32)
781                } else {
782                    None
783                };
784
785            crate::llm::provider::Usage {
786                prompt_tokens: usage_value
787                    .get("input_tokens")
788                    .or_else(|| usage_value.get("prompt_tokens"))
789                    .and_then(|pt| pt.as_u64())
790                    .unwrap_or(0) as u32,
791                completion_tokens: usage_value
792                    .get("output_tokens")
793                    .or_else(|| usage_value.get("completion_tokens"))
794                    .and_then(|ct| ct.as_u64())
795                    .unwrap_or(0) as u32,
796                total_tokens: usage_value
797                    .get("total_tokens")
798                    .and_then(|tt| tt.as_u64())
799                    .unwrap_or(0) as u32,
800                cached_prompt_tokens,
801                cache_creation_tokens: None,
802                cache_read_tokens: None,
803            }
804        });
805
806        let stop_reason = response_json
807            .get("stop_reason")
808            .and_then(|value| value.as_str())
809            .or_else(|| {
810                output
811                    .iter()
812                    .find_map(|item| item.get("stop_reason").and_then(|value| value.as_str()))
813            })
814            .unwrap_or("stop");
815
816        let finish_reason = match stop_reason {
817            "stop" => FinishReason::Stop,
818            "max_output_tokens" | "length" => FinishReason::Length,
819            "tool_use" | "tool_calls" => FinishReason::ToolCalls,
820            other => FinishReason::Error(other.to_string()),
821        };
822
823        Ok(LLMResponse {
824            content,
825            tool_calls,
826            usage,
827            finish_reason,
828            reasoning,
829        })
830    }
831}
832
833#[cfg(test)]
834mod tests {
835    use super::*;
836
837    fn sample_tool() -> ToolDefinition {
838        ToolDefinition::function(
839            "search_workspace".to_string(),
840            "Search project files".to_string(),
841            json!({
842                "type": "object",
843                "properties": {
844                    "query": {"type": "string"}
845                },
846                "required": ["query"],
847                "additionalProperties": false
848            }),
849        )
850    }
851
852    fn sample_request(model: &str) -> LLMRequest {
853        LLMRequest {
854            messages: vec![Message::user("Hello".to_string())],
855            system_prompt: None,
856            tools: Some(vec![sample_tool()]),
857            model: model.to_string(),
858            max_tokens: None,
859            temperature: None,
860            stream: false,
861            tool_choice: None,
862            parallel_tool_calls: None,
863            parallel_tool_config: None,
864            reasoning_effort: None,
865        }
866    }
867
868    #[test]
869    fn serialize_tools_wraps_function_definition() {
870        let tools = vec![sample_tool()];
871        let serialized = OpenAIProvider::serialize_tools(&tools).expect("tools should serialize");
872        let serialized_tools = serialized
873            .as_array()
874            .expect("serialized tools should be an array");
875        assert_eq!(serialized_tools.len(), 1);
876
877        let tool_value = serialized_tools[0]
878            .as_object()
879            .expect("tool should be serialized as object");
880        assert_eq!(
881            tool_value.get("type").and_then(Value::as_str),
882            Some("function")
883        );
884        assert!(tool_value.contains_key("function"));
885        assert!(!tool_value.contains_key("name"));
886
887        let function_value = tool_value
888            .get("function")
889            .and_then(Value::as_object)
890            .expect("function payload missing");
891        assert_eq!(
892            function_value.get("name").and_then(Value::as_str),
893            Some("search_workspace")
894        );
895        assert!(function_value.contains_key("parameters"));
896    }
897
898    #[test]
899    fn chat_completions_payload_uses_function_wrapper() {
900        let provider =
901            OpenAIProvider::with_model(String::new(), models::openai::DEFAULT_MODEL.to_string());
902        let request = sample_request(models::openai::DEFAULT_MODEL);
903        let payload = provider
904            .convert_to_openai_format(&request)
905            .expect("conversion should succeed");
906
907        let tools = payload
908            .get("tools")
909            .and_then(Value::as_array)
910            .expect("tools should exist on payload");
911        let tool_object = tools[0].as_object().expect("tool entry should be object");
912        assert!(tool_object.contains_key("function"));
913        assert!(!tool_object.contains_key("name"));
914    }
915
916    #[test]
917    fn responses_payload_uses_function_wrapper() {
918        let provider =
919            OpenAIProvider::with_model(String::new(), models::openai::GPT_5_CODEX.to_string());
920        let request = sample_request(models::openai::GPT_5_CODEX);
921        let payload = provider
922            .convert_to_openai_responses_format(&request)
923            .expect("conversion should succeed");
924
925        let tools = payload
926            .get("tools")
927            .and_then(Value::as_array)
928            .expect("tools should exist on payload");
929        let tool_object = tools[0].as_object().expect("tool entry should be object");
930        assert!(tool_object.contains_key("function"));
931        assert!(!tool_object.contains_key("name"));
932    }
933
934    #[test]
935    fn chat_completions_uses_max_completion_tokens_field() {
936        let provider =
937            OpenAIProvider::with_model(String::new(), models::openai::DEFAULT_MODEL.to_string());
938        let mut request = sample_request(models::openai::DEFAULT_MODEL);
939        request.max_tokens = Some(512);
940
941        let payload = provider
942            .convert_to_openai_format(&request)
943            .expect("conversion should succeed");
944
945        let max_tokens_value = payload
946            .get(MAX_COMPLETION_TOKENS_FIELD)
947            .and_then(Value::as_u64)
948            .expect("max completion tokens should be set");
949        assert_eq!(max_tokens_value, 512);
950        assert!(payload.get("max_tokens").is_none());
951    }
952}
953
954fn build_standard_responses_input_openai(request: &LLMRequest) -> Result<Vec<Value>, LLMError> {
955    let mut input = Vec::new();
956    let mut active_tool_call_ids: HashSet<String> = HashSet::new();
957
958    if let Some(system_prompt) = &request.system_prompt {
959        if !system_prompt.trim().is_empty() {
960            input.push(json!({
961                "role": "developer",
962                "content": [{
963                    "type": "input_text",
964                    "text": system_prompt.clone()
965                }]
966            }));
967        }
968    }
969
970    for msg in &request.messages {
971        match msg.role {
972            MessageRole::System => {
973                if !msg.content.trim().is_empty() {
974                    input.push(json!({
975                        "role": "developer",
976                        "content": [{
977                            "type": "input_text",
978                            "text": msg.content.clone()
979                        }]
980                    }));
981                }
982            }
983            MessageRole::User => {
984                input.push(json!({
985                    "role": "user",
986                    "content": [{
987                        "type": "input_text",
988                        "text": msg.content.clone()
989                    }]
990                }));
991            }
992            MessageRole::Assistant => {
993                let mut content_parts = Vec::new();
994                if !msg.content.is_empty() {
995                    content_parts.push(json!({
996                        "type": "output_text",
997                        "text": msg.content.clone()
998                    }));
999                }
1000
1001                if let Some(tool_calls) = &msg.tool_calls {
1002                    for call in tool_calls {
1003                        active_tool_call_ids.insert(call.id.clone());
1004                        content_parts.push(json!({
1005                            "type": "tool_call",
1006                            "id": call.id.clone(),
1007                            "function": {
1008                                "name": call.function.name.clone(),
1009                                "arguments": call.function.arguments.clone()
1010                            }
1011                        }));
1012                    }
1013                }
1014
1015                if !content_parts.is_empty() {
1016                    input.push(json!({
1017                        "role": "assistant",
1018                        "content": content_parts
1019                    }));
1020                }
1021            }
1022            MessageRole::Tool => {
1023                let tool_call_id = msg.tool_call_id.clone().ok_or_else(|| {
1024                    let formatted_error = error_display::format_llm_error(
1025                        "OpenAI",
1026                        "Tool messages must include tool_call_id for Responses API",
1027                    );
1028                    LLMError::InvalidRequest(formatted_error)
1029                })?;
1030
1031                if !active_tool_call_ids.contains(&tool_call_id) {
1032                    continue;
1033                }
1034
1035                let mut tool_content = Vec::new();
1036                if !msg.content.trim().is_empty() {
1037                    tool_content.push(json!({
1038                        "type": "output_text",
1039                        "text": msg.content.clone()
1040                    }));
1041                }
1042
1043                let mut tool_result = json!({
1044                    "type": "tool_result",
1045                    "tool_call_id": tool_call_id
1046                });
1047
1048                active_tool_call_ids.remove(&tool_call_id);
1049
1050                if !tool_content.is_empty() {
1051                    if let Value::Object(ref mut map) = tool_result {
1052                        map.insert("content".to_string(), json!(tool_content));
1053                    }
1054                }
1055
1056                input.push(json!({
1057                    "role": "tool",
1058                    "content": [tool_result]
1059                }));
1060            }
1061        }
1062    }
1063
1064    Ok(input)
1065}
1066
1067fn build_codex_responses_input_openai(request: &LLMRequest) -> Result<Vec<Value>, LLMError> {
1068    let mut additional_guidance = Vec::new();
1069
1070    if let Some(system_prompt) = &request.system_prompt {
1071        let trimmed = system_prompt.trim();
1072        if !trimmed.is_empty() {
1073            additional_guidance.push(trimmed.to_string());
1074        }
1075    }
1076
1077    let mut input = Vec::new();
1078    let mut active_tool_call_ids: HashSet<String> = HashSet::new();
1079
1080    for msg in &request.messages {
1081        match msg.role {
1082            MessageRole::System => {
1083                let trimmed = msg.content.trim();
1084                if !trimmed.is_empty() {
1085                    additional_guidance.push(trimmed.to_string());
1086                }
1087            }
1088            MessageRole::User => {
1089                input.push(json!({
1090                    "role": "user",
1091                    "content": [{
1092                        "type": "input_text",
1093                        "text": msg.content.clone()
1094                    }]
1095                }));
1096            }
1097            MessageRole::Assistant => {
1098                let mut content_parts = Vec::new();
1099                if !msg.content.is_empty() {
1100                    content_parts.push(json!({
1101                        "type": "output_text",
1102                        "text": msg.content.clone()
1103                    }));
1104                }
1105
1106                if let Some(tool_calls) = &msg.tool_calls {
1107                    for call in tool_calls {
1108                        active_tool_call_ids.insert(call.id.clone());
1109                        content_parts.push(json!({
1110                            "type": "tool_call",
1111                            "id": call.id.clone(),
1112                            "function": {
1113                                "name": call.function.name.clone(),
1114                                "arguments": call.function.arguments.clone()
1115                            }
1116                        }));
1117                    }
1118                }
1119
1120                if !content_parts.is_empty() {
1121                    input.push(json!({
1122                        "role": "assistant",
1123                        "content": content_parts
1124                    }));
1125                }
1126            }
1127            MessageRole::Tool => {
1128                let tool_call_id = msg.tool_call_id.clone().ok_or_else(|| {
1129                    let formatted_error = error_display::format_llm_error(
1130                        "OpenAI",
1131                        "Tool messages must include tool_call_id for Responses API",
1132                    );
1133                    LLMError::InvalidRequest(formatted_error)
1134                })?;
1135
1136                if !active_tool_call_ids.contains(&tool_call_id) {
1137                    continue;
1138                }
1139
1140                let mut tool_content = Vec::new();
1141                if !msg.content.trim().is_empty() {
1142                    tool_content.push(json!({
1143                        "type": "output_text",
1144                        "text": msg.content.clone()
1145                    }));
1146                }
1147
1148                let mut tool_result = json!({
1149                    "type": "tool_result",
1150                    "tool_call_id": tool_call_id
1151                });
1152
1153                active_tool_call_ids.remove(&tool_call_id);
1154
1155                if !tool_content.is_empty() {
1156                    if let Value::Object(ref mut map) = tool_result {
1157                        map.insert("content".to_string(), json!(tool_content));
1158                    }
1159                }
1160
1161                input.push(json!({
1162                    "role": "tool",
1163                    "content": [tool_result]
1164                }));
1165            }
1166        }
1167    }
1168
1169    let developer_prompt = gpt5_codex_developer_prompt(&additional_guidance);
1170    input.insert(
1171        0,
1172        json!({
1173            "role": "developer",
1174            "content": [{
1175                "type": "input_text",
1176                "text": developer_prompt
1177            }]
1178        }),
1179    );
1180
1181    Ok(input)
1182}
1183
1184#[async_trait]
1185impl LLMProvider for OpenAIProvider {
1186    fn name(&self) -> &str {
1187        "openai"
1188    }
1189
1190    fn supports_reasoning(&self, _model: &str) -> bool {
1191        false
1192    }
1193
1194    fn supports_reasoning_effort(&self, model: &str) -> bool {
1195        let requested = if model.trim().is_empty() {
1196            self.model.as_str()
1197        } else {
1198            model
1199        };
1200        models::openai::REASONING_MODELS
1201            .iter()
1202            .any(|candidate| *candidate == requested)
1203    }
1204
1205    async fn generate(&self, request: LLMRequest) -> Result<LLMResponse, LLMError> {
1206        let mut request = request;
1207        if request.model.trim().is_empty() {
1208            request.model = self.model.clone();
1209        }
1210
1211        if Self::uses_responses_api(&request.model) {
1212            let openai_request = self.convert_to_openai_responses_format(&request)?;
1213            let url = format!("{}/responses", self.base_url);
1214
1215            let response = self
1216                .http_client
1217                .post(&url)
1218                .bearer_auth(&self.api_key)
1219                .json(&openai_request)
1220                .send()
1221                .await
1222                .map_err(|e| {
1223                    let formatted_error =
1224                        error_display::format_llm_error("OpenAI", &format!("Network error: {}", e));
1225                    LLMError::Network(formatted_error)
1226                })?;
1227
1228            if !response.status().is_success() {
1229                let status = response.status();
1230                let error_text = response.text().await.unwrap_or_default();
1231
1232                if status.as_u16() == 429
1233                    || error_text.contains("insufficient_quota")
1234                    || error_text.contains("quota")
1235                    || error_text.contains("rate limit")
1236                {
1237                    return Err(LLMError::RateLimit);
1238                }
1239
1240                let formatted_error = error_display::format_llm_error(
1241                    "OpenAI",
1242                    &format!("HTTP {}: {}", status, error_text),
1243                );
1244                return Err(LLMError::Provider(formatted_error));
1245            }
1246
1247            let openai_response: Value = response.json().await.map_err(|e| {
1248                let formatted_error = error_display::format_llm_error(
1249                    "OpenAI",
1250                    &format!("Failed to parse response: {}", e),
1251                );
1252                LLMError::Provider(formatted_error)
1253            })?;
1254
1255            self.parse_openai_responses_response(openai_response)
1256        } else {
1257            let openai_request = self.convert_to_openai_format(&request)?;
1258            let url = format!("{}/chat/completions", self.base_url);
1259
1260            let response = self
1261                .http_client
1262                .post(&url)
1263                .bearer_auth(&self.api_key)
1264                .json(&openai_request)
1265                .send()
1266                .await
1267                .map_err(|e| {
1268                    let formatted_error =
1269                        error_display::format_llm_error("OpenAI", &format!("Network error: {}", e));
1270                    LLMError::Network(formatted_error)
1271                })?;
1272
1273            if !response.status().is_success() {
1274                let status = response.status();
1275                let error_text = response.text().await.unwrap_or_default();
1276
1277                if status.as_u16() == 429
1278                    || error_text.contains("insufficient_quota")
1279                    || error_text.contains("quota")
1280                    || error_text.contains("rate limit")
1281                {
1282                    return Err(LLMError::RateLimit);
1283                }
1284
1285                let formatted_error = error_display::format_llm_error(
1286                    "OpenAI",
1287                    &format!("HTTP {}: {}", status, error_text),
1288                );
1289                return Err(LLMError::Provider(formatted_error));
1290            }
1291
1292            let openai_response: Value = response.json().await.map_err(|e| {
1293                let formatted_error = error_display::format_llm_error(
1294                    "OpenAI",
1295                    &format!("Failed to parse response: {}", e),
1296                );
1297                LLMError::Provider(formatted_error)
1298            })?;
1299
1300            self.parse_openai_response(openai_response)
1301        }
1302    }
1303
1304    fn supported_models(&self) -> Vec<String> {
1305        models::openai::SUPPORTED_MODELS
1306            .iter()
1307            .map(|s| s.to_string())
1308            .collect()
1309    }
1310
1311    fn validate_request(&self, request: &LLMRequest) -> Result<(), LLMError> {
1312        if request.messages.is_empty() {
1313            let formatted_error =
1314                error_display::format_llm_error("OpenAI", "Messages cannot be empty");
1315            return Err(LLMError::InvalidRequest(formatted_error));
1316        }
1317
1318        if !self.supported_models().contains(&request.model) {
1319            let formatted_error = error_display::format_llm_error(
1320                "OpenAI",
1321                &format!("Unsupported model: {}", request.model),
1322            );
1323            return Err(LLMError::InvalidRequest(formatted_error));
1324        }
1325
1326        for message in &request.messages {
1327            if let Err(err) = message.validate_for_provider("openai") {
1328                let formatted = error_display::format_llm_error("OpenAI", &err);
1329                return Err(LLMError::InvalidRequest(formatted));
1330            }
1331        }
1332
1333        Ok(())
1334    }
1335}
1336
1337#[async_trait]
1338impl LLMClient for OpenAIProvider {
1339    async fn generate(&mut self, prompt: &str) -> Result<llm_types::LLMResponse, LLMError> {
1340        let request = self.parse_client_prompt(prompt);
1341        let request_model = request.model.clone();
1342        let response = LLMProvider::generate(self, request).await?;
1343
1344        Ok(llm_types::LLMResponse {
1345            content: response.content.unwrap_or_default(),
1346            model: request_model,
1347            usage: response.usage.map(|u| llm_types::Usage {
1348                prompt_tokens: u.prompt_tokens as usize,
1349                completion_tokens: u.completion_tokens as usize,
1350                total_tokens: u.total_tokens as usize,
1351                cached_prompt_tokens: u.cached_prompt_tokens.map(|v| v as usize),
1352                cache_creation_tokens: u.cache_creation_tokens.map(|v| v as usize),
1353                cache_read_tokens: u.cache_read_tokens.map(|v| v as usize),
1354            }),
1355            reasoning: response.reasoning,
1356        })
1357    }
1358
1359    fn backend_kind(&self) -> llm_types::BackendKind {
1360        llm_types::BackendKind::OpenAI
1361    }
1362
1363    fn model_id(&self) -> &str {
1364        &self.model
1365    }
1366}