vtcode_core/llm/providers/
openai.rs

1use crate::config::constants::{models, urls};
2use crate::config::core::{OpenAIPromptCacheSettings, PromptCachingConfig};
3use crate::config::models::Provider;
4use crate::config::types::ReasoningEffortLevel;
5use crate::llm::client::LLMClient;
6use crate::llm::error_display;
7use crate::llm::provider::{
8    FinishReason, LLMError, LLMProvider, LLMRequest, LLMResponse, Message, MessageRole, ToolCall,
9    ToolChoice, ToolDefinition,
10};
11use crate::llm::rig_adapter::reasoning_parameters_for;
12use crate::llm::types as llm_types;
13use async_trait::async_trait;
14use reqwest::Client as HttpClient;
15use serde_json::{Value, json};
16
17const MAX_COMPLETION_TOKENS_FIELD: &str = "max_completion_tokens";
18
19use super::{extract_reasoning_trace, gpt5_codex_developer_prompt};
20
21pub struct OpenAIProvider {
22    api_key: String,
23    http_client: HttpClient,
24    base_url: String,
25    model: String,
26    prompt_cache_enabled: bool,
27    prompt_cache_settings: OpenAIPromptCacheSettings,
28}
29
30impl OpenAIProvider {
31    fn serialize_tools(tools: &[ToolDefinition]) -> Option<Value> {
32        if tools.is_empty() {
33            return None;
34        }
35
36        let serialized_tools = tools.iter().map(|tool| json!(tool)).collect::<Vec<Value>>();
37
38        Some(Value::Array(serialized_tools))
39    }
40
41    fn is_gpt5_codex_model(model: &str) -> bool {
42        model == models::openai::GPT_5_CODEX
43    }
44
45    fn is_reasoning_model(model: &str) -> bool {
46        models::openai::REASONING_MODELS
47            .iter()
48            .any(|candidate| *candidate == model)
49    }
50
51    fn uses_responses_api(model: &str) -> bool {
52        Self::is_gpt5_codex_model(model) || Self::is_reasoning_model(model)
53    }
54
55    pub fn new(api_key: String) -> Self {
56        Self::with_model_internal(api_key, models::openai::DEFAULT_MODEL.to_string(), None)
57    }
58
59    pub fn with_model(api_key: String, model: String) -> Self {
60        Self::with_model_internal(api_key, model, None)
61    }
62
63    pub fn from_config(
64        api_key: Option<String>,
65        model: Option<String>,
66        base_url: Option<String>,
67        prompt_cache: Option<PromptCachingConfig>,
68    ) -> Self {
69        let api_key_value = api_key.unwrap_or_default();
70        let mut provider = if let Some(model_value) = model {
71            Self::with_model_internal(api_key_value, model_value, prompt_cache)
72        } else {
73            Self::with_model_internal(
74                api_key_value,
75                models::openai::DEFAULT_MODEL.to_string(),
76                prompt_cache,
77            )
78        };
79        if let Some(base) = base_url {
80            provider.base_url = base;
81        }
82        provider
83    }
84
85    fn with_model_internal(
86        api_key: String,
87        model: String,
88        prompt_cache: Option<PromptCachingConfig>,
89    ) -> Self {
90        let (prompt_cache_enabled, prompt_cache_settings) =
91            Self::extract_prompt_cache_settings(prompt_cache);
92
93        Self {
94            api_key,
95            http_client: HttpClient::new(),
96            base_url: urls::OPENAI_API_BASE.to_string(),
97            model,
98            prompt_cache_enabled,
99            prompt_cache_settings,
100        }
101    }
102
103    fn extract_prompt_cache_settings(
104        prompt_cache: Option<PromptCachingConfig>,
105    ) -> (bool, OpenAIPromptCacheSettings) {
106        if let Some(cfg) = prompt_cache {
107            let provider_settings = cfg.providers.openai;
108            let enabled = cfg.enabled && provider_settings.enabled;
109            (enabled, provider_settings)
110        } else {
111            (false, OpenAIPromptCacheSettings::default())
112        }
113    }
114
115    fn supports_temperature_parameter(model: &str) -> bool {
116        // GPT-5 variants and GPT-5 Codex models don't support temperature parameter
117        // All other OpenAI models generally support it
118        !Self::is_gpt5_codex_model(model)
119            && model != models::openai::GPT_5
120            && model != models::openai::GPT_5_MINI
121            && model != models::openai::GPT_5_NANO
122    }
123
124    fn default_request(&self, prompt: &str) -> LLMRequest {
125        LLMRequest {
126            messages: vec![Message::user(prompt.to_string())],
127            system_prompt: None,
128            tools: None,
129            model: self.model.clone(),
130            max_tokens: None,
131            temperature: None,
132            stream: false,
133            tool_choice: None,
134            parallel_tool_calls: None,
135            parallel_tool_config: None,
136            reasoning_effort: None,
137        }
138    }
139
140    fn parse_client_prompt(&self, prompt: &str) -> LLMRequest {
141        let trimmed = prompt.trim_start();
142        if trimmed.starts_with('{') {
143            if let Ok(value) = serde_json::from_str::<Value>(trimmed) {
144                if let Some(request) = self.parse_chat_request(&value) {
145                    return request;
146                }
147            }
148        }
149
150        self.default_request(prompt)
151    }
152
153    fn parse_chat_request(&self, value: &Value) -> Option<LLMRequest> {
154        let messages_value = value.get("messages")?.as_array()?;
155        let mut system_prompt = None;
156        let mut messages = Vec::new();
157
158        for entry in messages_value {
159            let role = entry
160                .get("role")
161                .and_then(|r| r.as_str())
162                .unwrap_or(crate::config::constants::message_roles::USER);
163            let content = entry.get("content");
164            let text_content = content.map(Self::extract_content_text).unwrap_or_default();
165
166            match role {
167                "system" => {
168                    if system_prompt.is_none() && !text_content.is_empty() {
169                        system_prompt = Some(text_content);
170                    }
171                }
172                "assistant" => {
173                    let tool_calls = entry
174                        .get("tool_calls")
175                        .and_then(|tc| tc.as_array())
176                        .map(|calls| {
177                            calls
178                                .iter()
179                                .filter_map(|call| {
180                                    let id = call.get("id").and_then(|v| v.as_str())?;
181                                    let function = call.get("function")?;
182                                    let name = function.get("name").and_then(|v| v.as_str())?;
183                                    let arguments = function.get("arguments");
184                                    let serialized = arguments.map_or("{}".to_string(), |value| {
185                                        if value.is_string() {
186                                            value.as_str().unwrap_or("").to_string()
187                                        } else {
188                                            value.to_string()
189                                        }
190                                    });
191                                    Some(ToolCall::function(
192                                        id.to_string(),
193                                        name.to_string(),
194                                        serialized,
195                                    ))
196                                })
197                                .collect::<Vec<_>>()
198                        })
199                        .filter(|calls| !calls.is_empty());
200
201                    let message = if let Some(calls) = tool_calls {
202                        Message {
203                            role: MessageRole::Assistant,
204                            content: text_content,
205                            tool_calls: Some(calls),
206                            tool_call_id: None,
207                        }
208                    } else {
209                        Message::assistant(text_content)
210                    };
211                    messages.push(message);
212                }
213                "tool" => {
214                    let tool_call_id = entry
215                        .get("tool_call_id")
216                        .and_then(|id| id.as_str())
217                        .map(|s| s.to_string());
218                    let content_value = entry
219                        .get("content")
220                        .map(|value| {
221                            if text_content.is_empty() {
222                                value.to_string()
223                            } else {
224                                text_content.clone()
225                            }
226                        })
227                        .unwrap_or_else(|| text_content.clone());
228                    messages.push(Message {
229                        role: MessageRole::Tool,
230                        content: content_value,
231                        tool_calls: None,
232                        tool_call_id,
233                    });
234                }
235                _ => {
236                    messages.push(Message::user(text_content));
237                }
238            }
239        }
240
241        if messages.is_empty() {
242            return None;
243        }
244
245        let tools = value.get("tools").and_then(|tools_value| {
246            let tools_array = tools_value.as_array()?;
247            let converted: Vec<_> = tools_array
248                .iter()
249                .filter_map(|tool| {
250                    let function = tool.get("function")?;
251                    let name = function.get("name").and_then(|n| n.as_str())?;
252                    let description = function
253                        .get("description")
254                        .and_then(|d| d.as_str())
255                        .unwrap_or("")
256                        .to_string();
257                    let parameters = function
258                        .get("parameters")
259                        .cloned()
260                        .unwrap_or_else(|| json!({}));
261                    Some(ToolDefinition::function(
262                        name.to_string(),
263                        description,
264                        parameters,
265                    ))
266                })
267                .collect();
268
269            if converted.is_empty() {
270                None
271            } else {
272                Some(converted)
273            }
274        });
275        let temperature = value
276            .get("temperature")
277            .and_then(|v| v.as_f64())
278            .map(|v| v as f32);
279        let max_tokens = value
280            .get(MAX_COMPLETION_TOKENS_FIELD)
281            .or_else(|| value.get("max_tokens"))
282            .and_then(|v| v.as_u64())
283            .map(|v| v as u32);
284        let stream = value
285            .get("stream")
286            .and_then(|v| v.as_bool())
287            .unwrap_or(false);
288        let tool_choice = value.get("tool_choice").and_then(Self::parse_tool_choice);
289        let parallel_tool_calls = value.get("parallel_tool_calls").and_then(|v| v.as_bool());
290        let reasoning_effort = value
291            .get("reasoning_effort")
292            .and_then(|v| v.as_str())
293            .and_then(ReasoningEffortLevel::from_str)
294            .or_else(|| {
295                value
296                    .get("reasoning")
297                    .and_then(|r| r.get("effort"))
298                    .and_then(|effort| effort.as_str())
299                    .and_then(ReasoningEffortLevel::from_str)
300            });
301
302        let model = value
303            .get("model")
304            .and_then(|m| m.as_str())
305            .unwrap_or(&self.model)
306            .to_string();
307
308        Some(LLMRequest {
309            messages,
310            system_prompt,
311            tools,
312            model,
313            max_tokens,
314            temperature,
315            stream,
316            tool_choice,
317            parallel_tool_calls,
318            parallel_tool_config: None,
319            reasoning_effort,
320        })
321    }
322
323    fn extract_content_text(content: &Value) -> String {
324        match content {
325            Value::String(text) => text.to_string(),
326            Value::Array(parts) => parts
327                .iter()
328                .filter_map(|part| {
329                    if let Some(text) = part.get("text").and_then(|t| t.as_str()) {
330                        Some(text.to_string())
331                    } else if let Some(Value::String(text)) = part.get("content") {
332                        Some(text.clone())
333                    } else {
334                        None
335                    }
336                })
337                .collect::<Vec<_>>()
338                .join(""),
339            _ => String::new(),
340        }
341    }
342
343    fn parse_tool_choice(choice: &Value) -> Option<ToolChoice> {
344        match choice {
345            Value::String(value) => match value.as_str() {
346                "auto" => Some(ToolChoice::auto()),
347                "none" => Some(ToolChoice::none()),
348                "required" => Some(ToolChoice::any()),
349                _ => None,
350            },
351            Value::Object(map) => {
352                let choice_type = map.get("type").and_then(|t| t.as_str())?;
353                match choice_type {
354                    "function" => map
355                        .get("function")
356                        .and_then(|f| f.get("name"))
357                        .and_then(|n| n.as_str())
358                        .map(|name| ToolChoice::function(name.to_string())),
359                    "auto" => Some(ToolChoice::auto()),
360                    "none" => Some(ToolChoice::none()),
361                    "any" | "required" => Some(ToolChoice::any()),
362                    _ => None,
363                }
364            }
365            _ => None,
366        }
367    }
368
369    fn convert_to_openai_format(&self, request: &LLMRequest) -> Result<Value, LLMError> {
370        let mut messages = Vec::new();
371
372        if let Some(system_prompt) = &request.system_prompt {
373            messages.push(json!({
374                "role": crate::config::constants::message_roles::SYSTEM,
375                "content": system_prompt
376            }));
377        }
378
379        for msg in &request.messages {
380            let role = msg.role.as_openai_str();
381            let mut message = json!({
382                "role": role,
383                "content": msg.content
384            });
385
386            if msg.role == MessageRole::Assistant {
387                if let Some(tool_calls) = &msg.tool_calls {
388                    if !tool_calls.is_empty() {
389                        let tool_calls_json: Vec<Value> = tool_calls
390                            .iter()
391                            .map(|tc| {
392                                json!({
393                                    "id": tc.id,
394                                    "type": "function",
395                                    "function": {
396                                        "name": tc.function.name,
397                                        "arguments": tc.function.arguments
398                                    }
399                                })
400                            })
401                            .collect();
402                        message["tool_calls"] = Value::Array(tool_calls_json);
403                    }
404                }
405            }
406
407            if msg.role == MessageRole::Tool {
408                if let Some(tool_call_id) = &msg.tool_call_id {
409                    message["tool_call_id"] = Value::String(tool_call_id.clone());
410                }
411            }
412
413            messages.push(message);
414        }
415
416        if messages.is_empty() {
417            let formatted_error = error_display::format_llm_error("OpenAI", "No messages provided");
418            return Err(LLMError::InvalidRequest(formatted_error));
419        }
420
421        let mut openai_request = json!({
422            "model": request.model,
423            "messages": messages,
424            "stream": request.stream
425        });
426
427        if let Some(max_tokens) = request.max_tokens {
428            if request.temperature.is_some() && Self::supports_temperature_parameter(&request.model)
429            {
430                if let Some(temperature) = request.temperature {
431                    openai_request["temperature"] = json!(temperature);
432                }
433            }
434            openai_request[MAX_COMPLETION_TOKENS_FIELD] = json!(max_tokens);
435        }
436
437        if let Some(tools) = &request.tools {
438            if let Some(serialized) = Self::serialize_tools(tools) {
439                openai_request["tools"] = serialized;
440            }
441        }
442
443        if let Some(tool_choice) = &request.tool_choice {
444            openai_request["tool_choice"] = tool_choice.to_provider_format("openai");
445        }
446
447        if let Some(parallel) = request.parallel_tool_calls {
448            openai_request["parallel_tool_calls"] = Value::Bool(parallel);
449        }
450
451        if let Some(effort) = request.reasoning_effort {
452            if self.supports_reasoning_effort(&request.model) {
453                if let Some(payload) = reasoning_parameters_for(Provider::OpenAI, effort) {
454                    openai_request["reasoning"] = payload;
455                } else {
456                    openai_request["reasoning"] = json!({ "effort": effort.as_str() });
457                }
458            }
459        }
460
461        Ok(openai_request)
462    }
463
464    fn convert_to_openai_responses_format(&self, request: &LLMRequest) -> Result<Value, LLMError> {
465        let input = if Self::is_gpt5_codex_model(&request.model) {
466            build_codex_responses_input_openai(request)?
467        } else {
468            build_standard_responses_input_openai(request)?
469        };
470
471        if input.is_empty() {
472            let formatted_error =
473                error_display::format_llm_error("OpenAI", "No messages provided for Responses API");
474            return Err(LLMError::InvalidRequest(formatted_error));
475        }
476
477        let mut openai_request = json!({
478            "model": request.model,
479            "input": input,
480            "stream": request.stream
481        });
482
483        if let Some(max_tokens) = request.max_tokens {
484            if request.temperature.is_some() && Self::supports_temperature_parameter(&request.model)
485            {
486                if let Some(temperature) = request.temperature {
487                    openai_request["temperature"] = json!(temperature);
488                }
489            }
490            openai_request["max_output_tokens"] = json!(max_tokens);
491        }
492
493        if let Some(tools) = &request.tools {
494            if let Some(serialized) = Self::serialize_tools(tools) {
495                openai_request["tools"] = serialized;
496            }
497        }
498
499        if let Some(tool_choice) = &request.tool_choice {
500            openai_request["tool_choice"] = tool_choice.to_provider_format("openai");
501        }
502
503        if let Some(parallel) = request.parallel_tool_calls {
504            openai_request["parallel_tool_calls"] = Value::Bool(parallel);
505        }
506
507        if let Some(effort) = request.reasoning_effort {
508            if self.supports_reasoning_effort(&request.model) {
509                if let Some(payload) = reasoning_parameters_for(Provider::OpenAI, effort) {
510                    openai_request["reasoning"] = payload;
511                } else {
512                    openai_request["reasoning"] = json!({ "effort": effort.as_str() });
513                }
514            }
515        }
516
517        if Self::is_reasoning_model(&request.model) {
518            openai_request["reasoning"] = json!({ "effort": "medium" });
519        }
520
521        Ok(openai_request)
522    }
523
524    fn parse_openai_response(&self, response_json: Value) -> Result<LLMResponse, LLMError> {
525        let choices = response_json
526            .get("choices")
527            .and_then(|c| c.as_array())
528            .ok_or_else(|| {
529                let formatted_error = error_display::format_llm_error(
530                    "OpenAI",
531                    "Invalid response format: missing choices",
532                );
533                LLMError::Provider(formatted_error)
534            })?;
535
536        if choices.is_empty() {
537            let formatted_error =
538                error_display::format_llm_error("OpenAI", "No choices in response");
539            return Err(LLMError::Provider(formatted_error));
540        }
541
542        let choice = &choices[0];
543        let message = choice.get("message").ok_or_else(|| {
544            let formatted_error = error_display::format_llm_error(
545                "OpenAI",
546                "Invalid response format: missing message",
547            );
548            LLMError::Provider(formatted_error)
549        })?;
550
551        let content = match message.get("content") {
552            Some(Value::String(text)) => Some(text.to_string()),
553            Some(Value::Array(parts)) => {
554                let text = parts
555                    .iter()
556                    .filter_map(|part| part.get("text").and_then(|t| t.as_str()))
557                    .collect::<Vec<_>>()
558                    .join("");
559                if text.is_empty() { None } else { Some(text) }
560            }
561            _ => None,
562        };
563
564        let tool_calls = message
565            .get("tool_calls")
566            .and_then(|tc| tc.as_array())
567            .map(|calls| {
568                calls
569                    .iter()
570                    .filter_map(|call| {
571                        let id = call.get("id").and_then(|v| v.as_str())?;
572                        let function = call.get("function")?;
573                        let name = function.get("name").and_then(|v| v.as_str())?;
574                        let arguments = function.get("arguments");
575                        let serialized = arguments.map_or("{}".to_string(), |value| {
576                            if value.is_string() {
577                                value.as_str().unwrap_or("").to_string()
578                            } else {
579                                value.to_string()
580                            }
581                        });
582                        Some(ToolCall::function(
583                            id.to_string(),
584                            name.to_string(),
585                            serialized,
586                        ))
587                    })
588                    .collect::<Vec<_>>()
589            })
590            .filter(|calls| !calls.is_empty());
591
592        let reasoning = message
593            .get("reasoning")
594            .and_then(extract_reasoning_trace)
595            .or_else(|| choice.get("reasoning").and_then(extract_reasoning_trace));
596
597        let finish_reason = choice
598            .get("finish_reason")
599            .and_then(|fr| fr.as_str())
600            .map(|fr| match fr {
601                "stop" => FinishReason::Stop,
602                "length" => FinishReason::Length,
603                "tool_calls" => FinishReason::ToolCalls,
604                "content_filter" => FinishReason::ContentFilter,
605                other => FinishReason::Error(other.to_string()),
606            })
607            .unwrap_or(FinishReason::Stop);
608
609        Ok(LLMResponse {
610            content,
611            tool_calls,
612            usage: response_json.get("usage").map(|usage_value| {
613                let cached_prompt_tokens =
614                    if self.prompt_cache_enabled && self.prompt_cache_settings.surface_metrics {
615                        usage_value
616                            .get("prompt_tokens_details")
617                            .and_then(|details| details.get("cached_tokens"))
618                            .and_then(|value| value.as_u64())
619                            .map(|value| value as u32)
620                    } else {
621                        None
622                    };
623
624                crate::llm::provider::Usage {
625                    prompt_tokens: usage_value
626                        .get("prompt_tokens")
627                        .and_then(|pt| pt.as_u64())
628                        .unwrap_or(0) as u32,
629                    completion_tokens: usage_value
630                        .get("completion_tokens")
631                        .and_then(|ct| ct.as_u64())
632                        .unwrap_or(0) as u32,
633                    total_tokens: usage_value
634                        .get("total_tokens")
635                        .and_then(|tt| tt.as_u64())
636                        .unwrap_or(0) as u32,
637                    cached_prompt_tokens,
638                    cache_creation_tokens: None,
639                    cache_read_tokens: None,
640                }
641            }),
642            finish_reason,
643            reasoning,
644        })
645    }
646
647    fn parse_openai_responses_response(
648        &self,
649        response_json: Value,
650    ) -> Result<LLMResponse, LLMError> {
651        let output = response_json
652            .get("output")
653            .or_else(|| response_json.get("choices"))
654            .and_then(|value| value.as_array())
655            .ok_or_else(|| {
656                let formatted_error = error_display::format_llm_error(
657                    "OpenAI",
658                    "Invalid response format: missing output",
659                );
660                LLMError::Provider(formatted_error)
661            })?;
662
663        if output.is_empty() {
664            let formatted_error =
665                error_display::format_llm_error("OpenAI", "No output in response");
666            return Err(LLMError::Provider(formatted_error));
667        }
668
669        let mut content_fragments = Vec::new();
670        let mut reasoning_fragments = Vec::new();
671        let mut tool_calls_vec = Vec::new();
672
673        for item in output {
674            let item_type = item
675                .get("type")
676                .and_then(|value| value.as_str())
677                .unwrap_or("");
678            if item_type != "message" {
679                continue;
680            }
681
682            if let Some(content_array) = item.get("content").and_then(|value| value.as_array()) {
683                for entry in content_array {
684                    let entry_type = entry
685                        .get("type")
686                        .and_then(|value| value.as_str())
687                        .unwrap_or("");
688                    match entry_type {
689                        "output_text" | "text" => {
690                            if let Some(text) = entry.get("text").and_then(|value| value.as_str()) {
691                                if !text.is_empty() {
692                                    content_fragments.push(text.to_string());
693                                }
694                            }
695                        }
696                        "reasoning" => {
697                            if let Some(text) = entry.get("text").and_then(|value| value.as_str()) {
698                                if !text.is_empty() {
699                                    reasoning_fragments.push(text.to_string());
700                                }
701                            }
702                        }
703                        "tool_call" => {
704                            let (name_value, arguments_value) = if let Some(function) =
705                                entry.get("function").and_then(|value| value.as_object())
706                            {
707                                let name = function.get("name").and_then(|value| value.as_str());
708                                let arguments = function.get("arguments");
709                                (name, arguments)
710                            } else {
711                                let name = entry.get("name").and_then(|value| value.as_str());
712                                let arguments = entry.get("arguments");
713                                (name, arguments)
714                            };
715
716                            if let Some(name) = name_value {
717                                let id = entry
718                                    .get("id")
719                                    .and_then(|value| value.as_str())
720                                    .unwrap_or_else(|| "");
721                                let serialized =
722                                    arguments_value.map_or("{}".to_string(), |value| {
723                                        if value.is_string() {
724                                            value.as_str().unwrap_or("").to_string()
725                                        } else {
726                                            value.to_string()
727                                        }
728                                    });
729                                tool_calls_vec.push(ToolCall::function(
730                                    id.to_string(),
731                                    name.to_string(),
732                                    serialized,
733                                ));
734                            }
735                        }
736                        _ => {}
737                    }
738                }
739            }
740        }
741
742        let content = if content_fragments.is_empty() {
743            None
744        } else {
745            Some(content_fragments.join(""))
746        };
747
748        let reasoning = if reasoning_fragments.is_empty() {
749            None
750        } else {
751            Some(reasoning_fragments.join(""))
752        };
753
754        let tool_calls = if tool_calls_vec.is_empty() {
755            None
756        } else {
757            Some(tool_calls_vec)
758        };
759
760        let usage = response_json.get("usage").map(|usage_value| {
761            let cached_prompt_tokens =
762                if self.prompt_cache_enabled && self.prompt_cache_settings.surface_metrics {
763                    usage_value
764                        .get("prompt_tokens_details")
765                        .and_then(|details| details.get("cached_tokens"))
766                        .or_else(|| usage_value.get("prompt_cache_hit_tokens"))
767                        .and_then(|value| value.as_u64())
768                        .map(|value| value as u32)
769                } else {
770                    None
771                };
772
773            crate::llm::provider::Usage {
774                prompt_tokens: usage_value
775                    .get("input_tokens")
776                    .or_else(|| usage_value.get("prompt_tokens"))
777                    .and_then(|pt| pt.as_u64())
778                    .unwrap_or(0) as u32,
779                completion_tokens: usage_value
780                    .get("output_tokens")
781                    .or_else(|| usage_value.get("completion_tokens"))
782                    .and_then(|ct| ct.as_u64())
783                    .unwrap_or(0) as u32,
784                total_tokens: usage_value
785                    .get("total_tokens")
786                    .and_then(|tt| tt.as_u64())
787                    .unwrap_or(0) as u32,
788                cached_prompt_tokens,
789                cache_creation_tokens: None,
790                cache_read_tokens: None,
791            }
792        });
793
794        let stop_reason = response_json
795            .get("stop_reason")
796            .and_then(|value| value.as_str())
797            .or_else(|| {
798                output
799                    .iter()
800                    .find_map(|item| item.get("stop_reason").and_then(|value| value.as_str()))
801            })
802            .unwrap_or("stop");
803
804        let finish_reason = match stop_reason {
805            "stop" => FinishReason::Stop,
806            "max_output_tokens" | "length" => FinishReason::Length,
807            "tool_use" | "tool_calls" => FinishReason::ToolCalls,
808            other => FinishReason::Error(other.to_string()),
809        };
810
811        Ok(LLMResponse {
812            content,
813            tool_calls,
814            usage,
815            finish_reason,
816            reasoning,
817        })
818    }
819}
820
821#[cfg(test)]
822mod tests {
823    use super::*;
824
825    fn sample_tool() -> ToolDefinition {
826        ToolDefinition::function(
827            "search_workspace".to_string(),
828            "Search project files".to_string(),
829            json!({
830                "type": "object",
831                "properties": {
832                    "query": {"type": "string"}
833                },
834                "required": ["query"],
835                "additionalProperties": false
836            }),
837        )
838    }
839
840    fn sample_request(model: &str) -> LLMRequest {
841        LLMRequest {
842            messages: vec![Message::user("Hello".to_string())],
843            system_prompt: None,
844            tools: Some(vec![sample_tool()]),
845            model: model.to_string(),
846            max_tokens: None,
847            temperature: None,
848            stream: false,
849            tool_choice: None,
850            parallel_tool_calls: None,
851            parallel_tool_config: None,
852            reasoning_effort: None,
853        }
854    }
855
856    #[test]
857    fn serialize_tools_wraps_function_definition() {
858        let tools = vec![sample_tool()];
859        let serialized = OpenAIProvider::serialize_tools(&tools).expect("tools should serialize");
860        let serialized_tools = serialized
861            .as_array()
862            .expect("serialized tools should be an array");
863        assert_eq!(serialized_tools.len(), 1);
864
865        let tool_value = serialized_tools[0]
866            .as_object()
867            .expect("tool should be serialized as object");
868        assert_eq!(
869            tool_value.get("type").and_then(Value::as_str),
870            Some("function")
871        );
872        assert!(tool_value.contains_key("function"));
873        assert!(!tool_value.contains_key("name"));
874
875        let function_value = tool_value
876            .get("function")
877            .and_then(Value::as_object)
878            .expect("function payload missing");
879        assert_eq!(
880            function_value.get("name").and_then(Value::as_str),
881            Some("search_workspace")
882        );
883        assert!(function_value.contains_key("parameters"));
884    }
885
886    #[test]
887    fn chat_completions_payload_uses_function_wrapper() {
888        let provider =
889            OpenAIProvider::with_model(String::new(), models::openai::DEFAULT_MODEL.to_string());
890        let request = sample_request(models::openai::DEFAULT_MODEL);
891        let payload = provider
892            .convert_to_openai_format(&request)
893            .expect("conversion should succeed");
894
895        let tools = payload
896            .get("tools")
897            .and_then(Value::as_array)
898            .expect("tools should exist on payload");
899        let tool_object = tools[0].as_object().expect("tool entry should be object");
900        assert!(tool_object.contains_key("function"));
901        assert!(!tool_object.contains_key("name"));
902    }
903
904    #[test]
905    fn responses_payload_uses_function_wrapper() {
906        let provider =
907            OpenAIProvider::with_model(String::new(), models::openai::GPT_5_CODEX.to_string());
908        let request = sample_request(models::openai::GPT_5_CODEX);
909        let payload = provider
910            .convert_to_openai_responses_format(&request)
911            .expect("conversion should succeed");
912
913        let tools = payload
914            .get("tools")
915            .and_then(Value::as_array)
916            .expect("tools should exist on payload");
917        let tool_object = tools[0].as_object().expect("tool entry should be object");
918        assert!(tool_object.contains_key("function"));
919        assert!(!tool_object.contains_key("name"));
920    }
921
922    #[test]
923    fn chat_completions_uses_max_completion_tokens_field() {
924        let provider =
925            OpenAIProvider::with_model(String::new(), models::openai::DEFAULT_MODEL.to_string());
926        let mut request = sample_request(models::openai::DEFAULT_MODEL);
927        request.max_tokens = Some(512);
928
929        let payload = provider
930            .convert_to_openai_format(&request)
931            .expect("conversion should succeed");
932
933        let max_tokens_value = payload
934            .get(MAX_COMPLETION_TOKENS_FIELD)
935            .and_then(Value::as_u64)
936            .expect("max completion tokens should be set");
937        assert_eq!(max_tokens_value, 512);
938        assert!(payload.get("max_tokens").is_none());
939    }
940}
941
942fn build_standard_responses_input_openai(request: &LLMRequest) -> Result<Vec<Value>, LLMError> {
943    let mut input = Vec::new();
944
945    if let Some(system_prompt) = &request.system_prompt {
946        if !system_prompt.trim().is_empty() {
947            input.push(json!({
948                "role": "developer",
949                "content": [{
950                    "type": "input_text",
951                    "text": system_prompt.clone()
952                }]
953            }));
954        }
955    }
956
957    for msg in &request.messages {
958        match msg.role {
959            MessageRole::System => {
960                if !msg.content.trim().is_empty() {
961                    input.push(json!({
962                        "role": "developer",
963                        "content": [{
964                            "type": "input_text",
965                            "text": msg.content.clone()
966                        }]
967                    }));
968                }
969            }
970            MessageRole::User => {
971                input.push(json!({
972                    "role": "user",
973                    "content": [{
974                        "type": "input_text",
975                        "text": msg.content.clone()
976                    }]
977                }));
978            }
979            MessageRole::Assistant => {
980                let mut content_parts = Vec::new();
981                if !msg.content.is_empty() {
982                    content_parts.push(json!({
983                        "type": "output_text",
984                        "text": msg.content.clone()
985                    }));
986                }
987
988                if let Some(tool_calls) = &msg.tool_calls {
989                    for call in tool_calls {
990                        content_parts.push(json!({
991                            "type": "tool_call",
992                            "id": call.id.clone(),
993                            "function": {
994                                "name": call.function.name.clone(),
995                                "arguments": call.function.arguments.clone()
996                            }
997                        }));
998                    }
999                }
1000
1001                if !content_parts.is_empty() {
1002                    input.push(json!({
1003                        "role": "assistant",
1004                        "content": content_parts
1005                    }));
1006                }
1007            }
1008            MessageRole::Tool => {
1009                let tool_call_id = msg.tool_call_id.clone().ok_or_else(|| {
1010                    let formatted_error = error_display::format_llm_error(
1011                        "OpenAI",
1012                        "Tool messages must include tool_call_id for Responses API",
1013                    );
1014                    LLMError::InvalidRequest(formatted_error)
1015                })?;
1016
1017                let mut tool_content = Vec::new();
1018                if !msg.content.trim().is_empty() {
1019                    tool_content.push(json!({
1020                        "type": "output_text",
1021                        "text": msg.content.clone()
1022                    }));
1023                }
1024
1025                let mut tool_result = json!({
1026                    "type": "tool_result",
1027                    "tool_call_id": tool_call_id
1028                });
1029
1030                if !tool_content.is_empty() {
1031                    if let Value::Object(ref mut map) = tool_result {
1032                        map.insert("content".to_string(), json!(tool_content));
1033                    }
1034                }
1035
1036                input.push(json!({
1037                    "role": "tool",
1038                    "content": [tool_result]
1039                }));
1040            }
1041        }
1042    }
1043
1044    Ok(input)
1045}
1046
1047fn build_codex_responses_input_openai(request: &LLMRequest) -> Result<Vec<Value>, LLMError> {
1048    let mut additional_guidance = Vec::new();
1049
1050    if let Some(system_prompt) = &request.system_prompt {
1051        let trimmed = system_prompt.trim();
1052        if !trimmed.is_empty() {
1053            additional_guidance.push(trimmed.to_string());
1054        }
1055    }
1056
1057    let mut input = Vec::new();
1058
1059    for msg in &request.messages {
1060        match msg.role {
1061            MessageRole::System => {
1062                let trimmed = msg.content.trim();
1063                if !trimmed.is_empty() {
1064                    additional_guidance.push(trimmed.to_string());
1065                }
1066            }
1067            MessageRole::User => {
1068                input.push(json!({
1069                    "role": "user",
1070                    "content": [{
1071                        "type": "input_text",
1072                        "text": msg.content.clone()
1073                    }]
1074                }));
1075            }
1076            MessageRole::Assistant => {
1077                let mut content_parts = Vec::new();
1078                if !msg.content.is_empty() {
1079                    content_parts.push(json!({
1080                        "type": "output_text",
1081                        "text": msg.content.clone()
1082                    }));
1083                }
1084
1085                if let Some(tool_calls) = &msg.tool_calls {
1086                    for call in tool_calls {
1087                        content_parts.push(json!({
1088                            "type": "tool_call",
1089                            "id": call.id.clone(),
1090                            "function": {
1091                                "name": call.function.name.clone(),
1092                                "arguments": call.function.arguments.clone()
1093                            }
1094                        }));
1095                    }
1096                }
1097
1098                if !content_parts.is_empty() {
1099                    input.push(json!({
1100                        "role": "assistant",
1101                        "content": content_parts
1102                    }));
1103                }
1104            }
1105            MessageRole::Tool => {
1106                let tool_call_id = msg.tool_call_id.clone().ok_or_else(|| {
1107                    let formatted_error = error_display::format_llm_error(
1108                        "OpenAI",
1109                        "Tool messages must include tool_call_id for Responses API",
1110                    );
1111                    LLMError::InvalidRequest(formatted_error)
1112                })?;
1113
1114                let mut tool_content = Vec::new();
1115                if !msg.content.trim().is_empty() {
1116                    tool_content.push(json!({
1117                        "type": "output_text",
1118                        "text": msg.content.clone()
1119                    }));
1120                }
1121
1122                let mut tool_result = json!({
1123                    "type": "tool_result",
1124                    "tool_call_id": tool_call_id
1125                });
1126
1127                if !tool_content.is_empty() {
1128                    if let Value::Object(ref mut map) = tool_result {
1129                        map.insert("content".to_string(), json!(tool_content));
1130                    }
1131                }
1132
1133                input.push(json!({
1134                    "role": "tool",
1135                    "content": [tool_result]
1136                }));
1137            }
1138        }
1139    }
1140
1141    let developer_prompt = gpt5_codex_developer_prompt(&additional_guidance);
1142    input.insert(
1143        0,
1144        json!({
1145            "role": "developer",
1146            "content": [{
1147                "type": "input_text",
1148                "text": developer_prompt
1149            }]
1150        }),
1151    );
1152
1153    Ok(input)
1154}
1155
1156#[async_trait]
1157impl LLMProvider for OpenAIProvider {
1158    fn name(&self) -> &str {
1159        "openai"
1160    }
1161
1162    fn supports_reasoning(&self, _model: &str) -> bool {
1163        false
1164    }
1165
1166    fn supports_reasoning_effort(&self, model: &str) -> bool {
1167        let requested = if model.trim().is_empty() {
1168            self.model.as_str()
1169        } else {
1170            model
1171        };
1172        models::openai::REASONING_MODELS
1173            .iter()
1174            .any(|candidate| *candidate == requested)
1175    }
1176
1177    async fn generate(&self, request: LLMRequest) -> Result<LLMResponse, LLMError> {
1178        let mut request = request;
1179        if request.model.trim().is_empty() {
1180            request.model = self.model.clone();
1181        }
1182
1183        if Self::uses_responses_api(&request.model) {
1184            let openai_request = self.convert_to_openai_responses_format(&request)?;
1185            let url = format!("{}/responses", self.base_url);
1186
1187            let response = self
1188                .http_client
1189                .post(&url)
1190                .bearer_auth(&self.api_key)
1191                .json(&openai_request)
1192                .send()
1193                .await
1194                .map_err(|e| {
1195                    let formatted_error =
1196                        error_display::format_llm_error("OpenAI", &format!("Network error: {}", e));
1197                    LLMError::Network(formatted_error)
1198                })?;
1199
1200            if !response.status().is_success() {
1201                let status = response.status();
1202                let error_text = response.text().await.unwrap_or_default();
1203
1204                if status.as_u16() == 429
1205                    || error_text.contains("insufficient_quota")
1206                    || error_text.contains("quota")
1207                    || error_text.contains("rate limit")
1208                {
1209                    return Err(LLMError::RateLimit);
1210                }
1211
1212                let formatted_error = error_display::format_llm_error(
1213                    "OpenAI",
1214                    &format!("HTTP {}: {}", status, error_text),
1215                );
1216                return Err(LLMError::Provider(formatted_error));
1217            }
1218
1219            let openai_response: Value = response.json().await.map_err(|e| {
1220                let formatted_error = error_display::format_llm_error(
1221                    "OpenAI",
1222                    &format!("Failed to parse response: {}", e),
1223                );
1224                LLMError::Provider(formatted_error)
1225            })?;
1226
1227            self.parse_openai_responses_response(openai_response)
1228        } else {
1229            let openai_request = self.convert_to_openai_format(&request)?;
1230            let url = format!("{}/chat/completions", self.base_url);
1231
1232            let response = self
1233                .http_client
1234                .post(&url)
1235                .bearer_auth(&self.api_key)
1236                .json(&openai_request)
1237                .send()
1238                .await
1239                .map_err(|e| {
1240                    let formatted_error =
1241                        error_display::format_llm_error("OpenAI", &format!("Network error: {}", e));
1242                    LLMError::Network(formatted_error)
1243                })?;
1244
1245            if !response.status().is_success() {
1246                let status = response.status();
1247                let error_text = response.text().await.unwrap_or_default();
1248
1249                if status.as_u16() == 429
1250                    || error_text.contains("insufficient_quota")
1251                    || error_text.contains("quota")
1252                    || error_text.contains("rate limit")
1253                {
1254                    return Err(LLMError::RateLimit);
1255                }
1256
1257                let formatted_error = error_display::format_llm_error(
1258                    "OpenAI",
1259                    &format!("HTTP {}: {}", status, error_text),
1260                );
1261                return Err(LLMError::Provider(formatted_error));
1262            }
1263
1264            let openai_response: Value = response.json().await.map_err(|e| {
1265                let formatted_error = error_display::format_llm_error(
1266                    "OpenAI",
1267                    &format!("Failed to parse response: {}", e),
1268                );
1269                LLMError::Provider(formatted_error)
1270            })?;
1271
1272            self.parse_openai_response(openai_response)
1273        }
1274    }
1275
1276    fn supported_models(&self) -> Vec<String> {
1277        models::openai::SUPPORTED_MODELS
1278            .iter()
1279            .map(|s| s.to_string())
1280            .collect()
1281    }
1282
1283    fn validate_request(&self, request: &LLMRequest) -> Result<(), LLMError> {
1284        if request.messages.is_empty() {
1285            let formatted_error =
1286                error_display::format_llm_error("OpenAI", "Messages cannot be empty");
1287            return Err(LLMError::InvalidRequest(formatted_error));
1288        }
1289
1290        if !self.supported_models().contains(&request.model) {
1291            let formatted_error = error_display::format_llm_error(
1292                "OpenAI",
1293                &format!("Unsupported model: {}", request.model),
1294            );
1295            return Err(LLMError::InvalidRequest(formatted_error));
1296        }
1297
1298        for message in &request.messages {
1299            if let Err(err) = message.validate_for_provider("openai") {
1300                let formatted = error_display::format_llm_error("OpenAI", &err);
1301                return Err(LLMError::InvalidRequest(formatted));
1302            }
1303        }
1304
1305        Ok(())
1306    }
1307}
1308
1309#[async_trait]
1310impl LLMClient for OpenAIProvider {
1311    async fn generate(&mut self, prompt: &str) -> Result<llm_types::LLMResponse, LLMError> {
1312        let request = self.parse_client_prompt(prompt);
1313        let request_model = request.model.clone();
1314        let response = LLMProvider::generate(self, request).await?;
1315
1316        Ok(llm_types::LLMResponse {
1317            content: response.content.unwrap_or_default(),
1318            model: request_model,
1319            usage: response.usage.map(|u| llm_types::Usage {
1320                prompt_tokens: u.prompt_tokens as usize,
1321                completion_tokens: u.completion_tokens as usize,
1322                total_tokens: u.total_tokens as usize,
1323                cached_prompt_tokens: u.cached_prompt_tokens.map(|v| v as usize),
1324                cache_creation_tokens: u.cache_creation_tokens.map(|v| v as usize),
1325                cache_read_tokens: u.cache_read_tokens.map(|v| v as usize),
1326            }),
1327            reasoning: response.reasoning,
1328        })
1329    }
1330
1331    fn backend_kind(&self) -> llm_types::BackendKind {
1332        llm_types::BackendKind::OpenAI
1333    }
1334
1335    fn model_id(&self) -> &str {
1336        &self.model
1337    }
1338}