1use crate::providers::traits::{
2 ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
3 Provider, TokenUsage, ToolCall as ProviderToolCall,
4};
5use crate::tools::ToolSpec;
6use async_trait::async_trait;
7use reqwest::Client;
8use serde::{Deserialize, Serialize};
9
10pub struct OpenAiProvider {
11 base_url: String,
12 credential: Option<String>,
13 max_tokens: Option<u32>,
14}
15
16#[derive(Debug, Serialize)]
17struct ChatRequest {
18 model: String,
19 messages: Vec<Message>,
20 temperature: f64,
21 #[serde(skip_serializing_if = "Option::is_none")]
22 max_tokens: Option<u32>,
23}
24
25#[derive(Debug, Serialize)]
26struct Message {
27 role: String,
28 content: String,
29}
30
31#[derive(Debug, Deserialize)]
32struct ChatResponse {
33 choices: Vec<Choice>,
34}
35
36#[derive(Debug, Deserialize)]
37struct Choice {
38 message: ResponseMessage,
39}
40
41#[derive(Debug, Deserialize)]
42struct ResponseMessage {
43 #[serde(default)]
44 content: Option<String>,
45 #[serde(default)]
47 reasoning_content: Option<String>,
48}
49
50impl ResponseMessage {
51 fn effective_content(&self) -> String {
52 match &self.content {
53 Some(c) if !c.is_empty() => c.clone(),
54 _ => self.reasoning_content.clone().unwrap_or_default(),
55 }
56 }
57}
58
59#[derive(Debug, Serialize)]
60struct NativeChatRequest {
61 model: String,
62 messages: Vec<NativeMessage>,
63 temperature: f64,
64 #[serde(skip_serializing_if = "Option::is_none")]
65 tools: Option<Vec<NativeToolSpec>>,
66 #[serde(skip_serializing_if = "Option::is_none")]
67 tool_choice: Option<String>,
68 #[serde(skip_serializing_if = "Option::is_none")]
69 max_tokens: Option<u32>,
70}
71
72#[derive(Debug, Serialize)]
73struct NativeMessage {
74 role: String,
75 #[serde(skip_serializing_if = "Option::is_none")]
76 content: Option<String>,
77 #[serde(skip_serializing_if = "Option::is_none")]
78 tool_call_id: Option<String>,
79 #[serde(skip_serializing_if = "Option::is_none")]
80 tool_calls: Option<Vec<NativeToolCall>>,
81 #[serde(skip_serializing_if = "Option::is_none")]
84 reasoning_content: Option<String>,
85}
86
87#[derive(Debug, Serialize, Deserialize)]
88struct NativeToolSpec {
89 #[serde(rename = "type")]
90 kind: String,
91 function: NativeToolFunctionSpec,
92}
93
94#[derive(Debug, Serialize, Deserialize)]
95struct NativeToolFunctionSpec {
96 name: String,
97 description: String,
98 parameters: serde_json::Value,
99}
100
101fn parse_native_tool_spec(value: serde_json::Value) -> anyhow::Result<NativeToolSpec> {
102 let spec: NativeToolSpec = serde_json::from_value(value)
103 .map_err(|e| anyhow::anyhow!("Invalid OpenAI tool specification: {e}"))?;
104
105 if spec.kind != "function" {
106 anyhow::bail!(
107 "Invalid OpenAI tool specification: unsupported tool type '{}', expected 'function'",
108 spec.kind
109 );
110 }
111
112 Ok(spec)
113}
114
115#[derive(Debug, Serialize, Deserialize)]
116struct NativeToolCall {
117 #[serde(skip_serializing_if = "Option::is_none")]
118 id: Option<String>,
119 #[serde(rename = "type", skip_serializing_if = "Option::is_none")]
120 kind: Option<String>,
121 function: NativeFunctionCall,
122}
123
124#[derive(Debug, Serialize, Deserialize)]
125struct NativeFunctionCall {
126 name: String,
127 arguments: String,
128}
129
130#[derive(Debug, Deserialize)]
131struct NativeChatResponse {
132 choices: Vec<NativeChoice>,
133 #[serde(default)]
134 usage: Option<UsageInfo>,
135}
136
137#[derive(Debug, Deserialize)]
138struct UsageInfo {
139 #[serde(default)]
140 prompt_tokens: Option<u64>,
141 #[serde(default)]
142 completion_tokens: Option<u64>,
143 #[serde(default)]
144 prompt_tokens_details: Option<PromptTokensDetails>,
145}
146
147#[derive(Debug, Deserialize)]
148struct PromptTokensDetails {
149 #[serde(default)]
150 cached_tokens: Option<u64>,
151}
152
153#[derive(Debug, Deserialize)]
154struct NativeChoice {
155 message: NativeResponseMessage,
156}
157
158#[derive(Debug, Deserialize)]
159struct NativeResponseMessage {
160 #[serde(default)]
161 content: Option<String>,
162 #[serde(default)]
164 reasoning_content: Option<String>,
165 #[serde(default)]
166 tool_calls: Option<Vec<NativeToolCall>>,
167}
168
169impl NativeResponseMessage {
170 fn effective_content(&self) -> Option<String> {
171 match &self.content {
172 Some(c) if !c.is_empty() => Some(c.clone()),
173 _ => self.reasoning_content.clone(),
174 }
175 }
176}
177
178impl OpenAiProvider {
179 pub fn new(credential: Option<&str>) -> Self {
180 Self::with_base_url(None, credential)
181 }
182
183 pub fn with_base_url(base_url: Option<&str>, credential: Option<&str>) -> Self {
186 Self {
187 base_url: base_url
188 .map(|u| u.trim_end_matches('/').to_string())
189 .unwrap_or_else(|| "https://api.openai.com/v1".to_string()),
190 credential: credential.map(ToString::to_string),
191 max_tokens: None,
192 }
193 }
194
195 pub fn with_max_tokens(mut self, max_tokens: Option<u32>) -> Self {
197 self.max_tokens = max_tokens;
198 self
199 }
200
201 fn adjust_temperature_for_model(model: &str, requested_temperature: f64) -> f64 {
204 let requires_1_0 = matches!(
206 model,
207 "gpt-5"
208 | "gpt-5-2025-08-07"
209 | "gpt-5-mini"
210 | "gpt-5-mini-2025-08-07"
211 | "gpt-5-nano"
212 | "gpt-5-nano-2025-08-07"
213 | "gpt-5.1-chat-latest"
214 | "gpt-5.2-chat-latest"
215 | "gpt-5.3-chat-latest"
216 | "o1"
217 | "o1-2024-12-17"
218 | "o3"
219 | "o3-2025-04-16"
220 | "o3-mini"
221 | "o3-mini-2025-01-31"
222 | "o4-mini"
223 | "o4-mini-2025-04-16"
224 );
225
226 if requires_1_0 {
227 1.0
228 } else {
229 requested_temperature
230 }
231 }
232
233 fn convert_tools(tools: Option<&[ToolSpec]>) -> Option<Vec<NativeToolSpec>> {
234 tools.map(|items| {
235 items
236 .iter()
237 .map(|tool| NativeToolSpec {
238 kind: "function".to_string(),
239 function: NativeToolFunctionSpec {
240 name: tool.name.clone(),
241 description: tool.description.clone(),
242 parameters: tool.parameters.clone(),
243 },
244 })
245 .collect()
246 })
247 }
248
249 fn convert_messages(messages: &[ChatMessage]) -> Vec<NativeMessage> {
250 messages
251 .iter()
252 .map(|m| {
253 if m.role == "assistant" {
254 if let Ok(value) = serde_json::from_str::<serde_json::Value>(&m.content) {
255 if let Some(tool_calls_value) = value.get("tool_calls") {
256 if let Ok(parsed_calls) =
257 serde_json::from_value::<Vec<ProviderToolCall>>(
258 tool_calls_value.clone(),
259 )
260 {
261 let tool_calls = parsed_calls
262 .into_iter()
263 .map(|tc| NativeToolCall {
264 id: Some(tc.id),
265 kind: Some("function".to_string()),
266 function: NativeFunctionCall {
267 name: tc.name,
268 arguments: tc.arguments,
269 },
270 })
271 .collect::<Vec<_>>();
272 let content = value
273 .get("content")
274 .and_then(serde_json::Value::as_str)
275 .map(ToString::to_string);
276 let reasoning_content = value
277 .get("reasoning_content")
278 .and_then(serde_json::Value::as_str)
279 .map(ToString::to_string);
280 return NativeMessage {
281 role: "assistant".to_string(),
282 content,
283 tool_call_id: None,
284 tool_calls: Some(tool_calls),
285 reasoning_content,
286 };
287 }
288 }
289 }
290 }
291
292 if m.role == "tool" {
293 if let Ok(value) = serde_json::from_str::<serde_json::Value>(&m.content) {
294 let tool_call_id = value
295 .get("tool_call_id")
296 .and_then(serde_json::Value::as_str)
297 .map(ToString::to_string);
298 let content = value
299 .get("content")
300 .and_then(serde_json::Value::as_str)
301 .map(ToString::to_string);
302 return NativeMessage {
303 role: "tool".to_string(),
304 content,
305 tool_call_id,
306 tool_calls: None,
307 reasoning_content: None,
308 };
309 }
310 }
311
312 NativeMessage {
313 role: m.role.clone(),
314 content: Some(m.content.clone()),
315 tool_call_id: None,
316 tool_calls: None,
317 reasoning_content: None,
318 }
319 })
320 .collect()
321 }
322
323 fn parse_native_response(message: NativeResponseMessage) -> ProviderChatResponse {
324 let text = message.effective_content();
325 let reasoning_content = message.reasoning_content.clone();
326 let tool_calls = message
327 .tool_calls
328 .unwrap_or_default()
329 .into_iter()
330 .map(|tc| ProviderToolCall {
331 id: tc.id.unwrap_or_else(|| uuid::Uuid::new_v4().to_string()),
332 name: tc.function.name,
333 arguments: tc.function.arguments,
334 })
335 .collect::<Vec<_>>();
336
337 ProviderChatResponse {
338 text,
339 tool_calls,
340 usage: None,
341 reasoning_content,
342 }
343 }
344
345 fn http_client(&self) -> Client {
346 crate::config::build_runtime_proxy_client_with_timeouts("provider.openai", 120, 10)
347 }
348}
349
350#[async_trait]
351impl Provider for OpenAiProvider {
352 async fn chat_with_system(
353 &self,
354 system_prompt: Option<&str>,
355 message: &str,
356 model: &str,
357 temperature: f64,
358 ) -> anyhow::Result<String> {
359 let credential = self.credential.as_ref().ok_or_else(|| {
360 anyhow::anyhow!("OpenAI API key not set. Set OPENAI_API_KEY or edit config.toml.")
361 })?;
362
363 let adjusted_temperature = Self::adjust_temperature_for_model(model, temperature);
364
365 let mut messages = Vec::new();
366
367 if let Some(sys) = system_prompt {
368 messages.push(Message {
369 role: "system".to_string(),
370 content: sys.to_string(),
371 });
372 }
373
374 messages.push(Message {
375 role: "user".to_string(),
376 content: message.to_string(),
377 });
378
379 let request = ChatRequest {
380 model: model.to_string(),
381 messages,
382 temperature: adjusted_temperature,
383 max_tokens: self.max_tokens,
384 };
385
386 let response = self
387 .http_client()
388 .post(format!("{}/chat/completions", self.base_url))
389 .header("Authorization", format!("Bearer {credential}"))
390 .json(&request)
391 .send()
392 .await?;
393
394 if !response.status().is_success() {
395 return Err(super::api_error("OpenAI", response).await);
396 }
397
398 let chat_response: ChatResponse = response.json().await?;
399
400 chat_response
401 .choices
402 .into_iter()
403 .next()
404 .map(|c| c.message.effective_content())
405 .ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))
406 }
407
408 async fn chat(
409 &self,
410 request: ProviderChatRequest<'_>,
411 model: &str,
412 temperature: f64,
413 ) -> anyhow::Result<ProviderChatResponse> {
414 let credential = self.credential.as_ref().ok_or_else(|| {
415 anyhow::anyhow!("OpenAI API key not set. Set OPENAI_API_KEY or edit config.toml.")
416 })?;
417
418 let adjusted_temperature = Self::adjust_temperature_for_model(model, temperature);
419
420 let tools = Self::convert_tools(request.tools);
421 let native_request = NativeChatRequest {
422 model: model.to_string(),
423 messages: Self::convert_messages(request.messages),
424 temperature: adjusted_temperature,
425 tool_choice: tools.as_ref().map(|_| "auto".to_string()),
426 tools,
427 max_tokens: self.max_tokens,
428 };
429
430 let response = self
431 .http_client()
432 .post(format!("{}/chat/completions", self.base_url))
433 .header("Authorization", format!("Bearer {credential}"))
434 .json(&native_request)
435 .send()
436 .await?;
437
438 if !response.status().is_success() {
439 return Err(super::api_error("OpenAI", response).await);
440 }
441
442 let native_response: NativeChatResponse = response.json().await?;
443 let usage = native_response.usage.map(|u| TokenUsage {
444 input_tokens: u.prompt_tokens,
445 output_tokens: u.completion_tokens,
446 cached_input_tokens: u.prompt_tokens_details.and_then(|d| d.cached_tokens),
447 });
448 let message = native_response
449 .choices
450 .into_iter()
451 .next()
452 .map(|c| c.message)
453 .ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))?;
454 let mut result = Self::parse_native_response(message);
455 result.usage = usage;
456 Ok(result)
457 }
458
459 fn supports_native_tools(&self) -> bool {
460 true
461 }
462
463 async fn chat_with_tools(
464 &self,
465 messages: &[ChatMessage],
466 tools: &[serde_json::Value],
467 model: &str,
468 temperature: f64,
469 ) -> anyhow::Result<ProviderChatResponse> {
470 let credential = self.credential.as_ref().ok_or_else(|| {
471 anyhow::anyhow!("OpenAI API key not set. Set OPENAI_API_KEY or edit config.toml.")
472 })?;
473
474 let adjusted_temperature = Self::adjust_temperature_for_model(model, temperature);
475
476 let native_tools: Option<Vec<NativeToolSpec>> = if tools.is_empty() {
477 None
478 } else {
479 Some(
480 tools
481 .iter()
482 .cloned()
483 .map(parse_native_tool_spec)
484 .collect::<Result<Vec<_>, _>>()?,
485 )
486 };
487
488 let native_request = NativeChatRequest {
489 model: model.to_string(),
490 messages: Self::convert_messages(messages),
491 temperature: adjusted_temperature,
492 tool_choice: native_tools.as_ref().map(|_| "auto".to_string()),
493 tools: native_tools,
494 max_tokens: self.max_tokens,
495 };
496
497 let response = self
498 .http_client()
499 .post(format!("{}/chat/completions", self.base_url))
500 .header("Authorization", format!("Bearer {credential}"))
501 .json(&native_request)
502 .send()
503 .await?;
504
505 if !response.status().is_success() {
506 return Err(super::api_error("OpenAI", response).await);
507 }
508
509 let native_response: NativeChatResponse = response.json().await?;
510 let usage = native_response.usage.map(|u| TokenUsage {
511 input_tokens: u.prompt_tokens,
512 output_tokens: u.completion_tokens,
513 cached_input_tokens: u.prompt_tokens_details.and_then(|d| d.cached_tokens),
514 });
515 let message = native_response
516 .choices
517 .into_iter()
518 .next()
519 .map(|c| c.message)
520 .ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))?;
521 let mut result = Self::parse_native_response(message);
522 result.usage = usage;
523 Ok(result)
524 }
525
526 async fn warmup(&self) -> anyhow::Result<()> {
527 if let Some(credential) = self.credential.as_ref() {
528 self.http_client()
529 .get(format!("{}/models", self.base_url))
530 .header("Authorization", format!("Bearer {credential}"))
531 .send()
532 .await?
533 .error_for_status()?;
534 }
535 Ok(())
536 }
537}
538
539#[cfg(test)]
540mod tests {
541 use super::*;
542
543 #[test]
544 fn creates_with_key() {
545 let p = OpenAiProvider::new(Some("openai-test-credential"));
546 assert_eq!(p.credential.as_deref(), Some("openai-test-credential"));
547 }
548
549 #[test]
550 fn creates_without_key() {
551 let p = OpenAiProvider::new(None);
552 assert!(p.credential.is_none());
553 }
554
555 #[test]
556 fn creates_with_empty_key() {
557 let p = OpenAiProvider::new(Some(""));
558 assert_eq!(p.credential.as_deref(), Some(""));
559 }
560
561 #[tokio::test]
562 async fn chat_fails_without_key() {
563 let p = OpenAiProvider::new(None);
564 let result = p.chat_with_system(None, "hello", "gpt-4o", 0.7).await;
565 assert!(result.is_err());
566 assert!(result.unwrap_err().to_string().contains("API key not set"));
567 }
568
569 #[tokio::test]
570 async fn chat_with_system_fails_without_key() {
571 let p = OpenAiProvider::new(None);
572 let result = p
573 .chat_with_system(Some("You are Construct"), "test", "gpt-4o", 0.5)
574 .await;
575 assert!(result.is_err());
576 }
577
578 #[test]
579 fn request_serializes_with_system_message() {
580 let req = ChatRequest {
581 model: "gpt-4o".to_string(),
582 messages: vec![
583 Message {
584 role: "system".to_string(),
585 content: "You are Construct".to_string(),
586 },
587 Message {
588 role: "user".to_string(),
589 content: "hello".to_string(),
590 },
591 ],
592 temperature: 0.7,
593 max_tokens: None,
594 };
595 let json = serde_json::to_string(&req).unwrap();
596 assert!(json.contains("\"role\":\"system\""));
597 assert!(json.contains("\"role\":\"user\""));
598 assert!(json.contains("gpt-4o"));
599 }
600
601 #[test]
602 fn request_serializes_without_system() {
603 let req = ChatRequest {
604 model: "gpt-4o".to_string(),
605 messages: vec![Message {
606 role: "user".to_string(),
607 content: "hello".to_string(),
608 }],
609 temperature: 0.0,
610 max_tokens: None,
611 };
612 let json = serde_json::to_string(&req).unwrap();
613 assert!(!json.contains("system"));
614 assert!(json.contains("\"temperature\":0.0"));
615 }
616
617 #[test]
618 fn response_deserializes_single_choice() {
619 let json = r#"{"choices":[{"message":{"content":"Hi!"}}]}"#;
620 let resp: ChatResponse = serde_json::from_str(json).unwrap();
621 assert_eq!(resp.choices.len(), 1);
622 assert_eq!(resp.choices[0].message.effective_content(), "Hi!");
623 }
624
625 #[test]
626 fn response_deserializes_empty_choices() {
627 let json = r#"{"choices":[]}"#;
628 let resp: ChatResponse = serde_json::from_str(json).unwrap();
629 assert!(resp.choices.is_empty());
630 }
631
632 #[test]
633 fn response_deserializes_multiple_choices() {
634 let json = r#"{"choices":[{"message":{"content":"A"}},{"message":{"content":"B"}}]}"#;
635 let resp: ChatResponse = serde_json::from_str(json).unwrap();
636 assert_eq!(resp.choices.len(), 2);
637 assert_eq!(resp.choices[0].message.effective_content(), "A");
638 }
639
640 #[test]
641 fn response_with_unicode() {
642 let json = r#"{"choices":[{"message":{"content":"Hello \u03A9"}}]}"#;
643 let resp: ChatResponse = serde_json::from_str(json).unwrap();
644 assert_eq!(
645 resp.choices[0].message.effective_content(),
646 "Hello \u{03A9}"
647 );
648 }
649
650 #[test]
651 fn response_with_long_content() {
652 let long = "x".repeat(100_000);
653 let json = format!(r#"{{"choices":[{{"message":{{"content":"{long}"}}}}]}}"#);
654 let resp: ChatResponse = serde_json::from_str(&json).unwrap();
655 assert_eq!(
656 resp.choices[0].message.content.as_ref().unwrap().len(),
657 100_000
658 );
659 }
660
661 #[tokio::test]
662 async fn warmup_without_key_is_noop() {
663 let provider = OpenAiProvider::new(None);
664 let result = provider.warmup().await;
665 assert!(result.is_ok());
666 }
667
668 #[test]
673 fn reasoning_content_fallback_empty_content() {
674 let json = r#"{"choices":[{"message":{"content":"","reasoning_content":"Thinking..."}}]}"#;
675 let resp: ChatResponse = serde_json::from_str(json).unwrap();
676 assert_eq!(resp.choices[0].message.effective_content(), "Thinking...");
677 }
678
679 #[test]
680 fn reasoning_content_fallback_null_content() {
681 let json =
682 r#"{"choices":[{"message":{"content":null,"reasoning_content":"Thinking..."}}]}"#;
683 let resp: ChatResponse = serde_json::from_str(json).unwrap();
684 assert_eq!(resp.choices[0].message.effective_content(), "Thinking...");
685 }
686
687 #[test]
688 fn reasoning_content_not_used_when_content_present() {
689 let json = r#"{"choices":[{"message":{"content":"Hello","reasoning_content":"Ignored"}}]}"#;
690 let resp: ChatResponse = serde_json::from_str(json).unwrap();
691 assert_eq!(resp.choices[0].message.effective_content(), "Hello");
692 }
693
694 #[test]
695 fn native_response_reasoning_content_fallback() {
696 let json =
697 r#"{"choices":[{"message":{"content":"","reasoning_content":"Native thinking"}}]}"#;
698 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
699 let msg = &resp.choices[0].message;
700 assert_eq!(msg.effective_content(), Some("Native thinking".to_string()));
701 }
702
703 #[test]
704 fn native_response_reasoning_content_ignored_when_content_present() {
705 let json =
706 r#"{"choices":[{"message":{"content":"Real answer","reasoning_content":"Ignored"}}]}"#;
707 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
708 let msg = &resp.choices[0].message;
709 assert_eq!(msg.effective_content(), Some("Real answer".to_string()));
710 }
711
712 #[tokio::test]
713 async fn chat_with_tools_fails_without_key() {
714 let p = OpenAiProvider::new(None);
715 let messages = vec![ChatMessage::user("hello".to_string())];
716 let tools = vec![serde_json::json!({
717 "type": "function",
718 "function": {
719 "name": "shell",
720 "description": "Run a shell command",
721 "parameters": {
722 "type": "object",
723 "properties": {
724 "command": { "type": "string" }
725 },
726 "required": ["command"]
727 }
728 }
729 })];
730 let result = p.chat_with_tools(&messages, &tools, "gpt-4o", 0.7).await;
731 assert!(result.is_err());
732 assert!(result.unwrap_err().to_string().contains("API key not set"));
733 }
734
735 #[tokio::test]
736 async fn chat_with_tools_rejects_invalid_tool_shape() {
737 let p = OpenAiProvider::new(Some("openai-test-credential"));
738 let messages = vec![ChatMessage::user("hello".to_string())];
739 let tools = vec![serde_json::json!({
740 "type": "function",
741 "function": {
742 "name": "shell",
743 "parameters": {
744 "type": "object",
745 "properties": {
746 "command": { "type": "string" }
747 },
748 "required": ["command"]
749 }
750 }
751 })];
752
753 let result = p.chat_with_tools(&messages, &tools, "gpt-4o", 0.7).await;
754 assert!(result.is_err());
755 assert!(
756 result
757 .unwrap_err()
758 .to_string()
759 .contains("Invalid OpenAI tool specification")
760 );
761 }
762
763 #[test]
764 fn native_tool_spec_deserializes_from_openai_format() {
765 let json = serde_json::json!({
766 "type": "function",
767 "function": {
768 "name": "shell",
769 "description": "Run a shell command",
770 "parameters": {
771 "type": "object",
772 "properties": {
773 "command": { "type": "string" }
774 },
775 "required": ["command"]
776 }
777 }
778 });
779 let spec = parse_native_tool_spec(json).unwrap();
780 assert_eq!(spec.kind, "function");
781 assert_eq!(spec.function.name, "shell");
782 }
783
784 #[test]
785 fn native_response_parses_usage() {
786 let json = r#"{
787 "choices": [{"message": {"content": "Hello"}}],
788 "usage": {"prompt_tokens": 100, "completion_tokens": 50}
789 }"#;
790 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
791 let usage = resp.usage.unwrap();
792 assert_eq!(usage.prompt_tokens, Some(100));
793 assert_eq!(usage.completion_tokens, Some(50));
794 }
795
796 #[test]
797 fn native_response_parses_without_usage() {
798 let json = r#"{"choices": [{"message": {"content": "Hello"}}]}"#;
799 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
800 assert!(resp.usage.is_none());
801 }
802
803 #[test]
808 fn parse_native_response_captures_reasoning_content() {
809 let json = r#"{"choices":[{"message":{
810 "content":"answer",
811 "reasoning_content":"thinking step",
812 "tool_calls":[{"id":"call_1","type":"function","function":{"name":"shell","arguments":"{}"}}]
813 }}]}"#;
814 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
815 let message = resp.choices.into_iter().next().unwrap().message;
816 let parsed = OpenAiProvider::parse_native_response(message);
817 assert_eq!(parsed.reasoning_content.as_deref(), Some("thinking step"));
818 assert_eq!(parsed.tool_calls.len(), 1);
819 }
820
821 #[test]
822 fn parse_native_response_none_reasoning_content_for_normal_model() {
823 let json = r#"{"choices":[{"message":{"content":"hello"}}]}"#;
824 let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
825 let message = resp.choices.into_iter().next().unwrap().message;
826 let parsed = OpenAiProvider::parse_native_response(message);
827 assert!(parsed.reasoning_content.is_none());
828 }
829
830 #[test]
831 fn convert_messages_round_trips_reasoning_content() {
832 use crate::providers::ChatMessage;
833
834 let history_json = serde_json::json!({
835 "content": "I will check",
836 "tool_calls": [{
837 "id": "tc_1",
838 "name": "shell",
839 "arguments": "{}"
840 }],
841 "reasoning_content": "Let me think..."
842 });
843
844 let messages = vec![ChatMessage::assistant(history_json.to_string())];
845 let native = OpenAiProvider::convert_messages(&messages);
846 assert_eq!(native.len(), 1);
847 assert_eq!(
848 native[0].reasoning_content.as_deref(),
849 Some("Let me think...")
850 );
851 }
852
853 #[test]
854 fn convert_messages_no_reasoning_content_when_absent() {
855 use crate::providers::ChatMessage;
856
857 let history_json = serde_json::json!({
858 "content": "I will check",
859 "tool_calls": [{
860 "id": "tc_1",
861 "name": "shell",
862 "arguments": "{}"
863 }]
864 });
865
866 let messages = vec![ChatMessage::assistant(history_json.to_string())];
867 let native = OpenAiProvider::convert_messages(&messages);
868 assert_eq!(native.len(), 1);
869 assert!(native[0].reasoning_content.is_none());
870 }
871
872 #[test]
873 fn native_message_omits_reasoning_content_when_none() {
874 let msg = NativeMessage {
875 role: "assistant".to_string(),
876 content: Some("hi".to_string()),
877 tool_call_id: None,
878 tool_calls: None,
879 reasoning_content: None,
880 };
881 let json = serde_json::to_string(&msg).unwrap();
882 assert!(!json.contains("reasoning_content"));
883 }
884
885 #[test]
886 fn native_message_includes_reasoning_content_when_some() {
887 let msg = NativeMessage {
888 role: "assistant".to_string(),
889 content: Some("hi".to_string()),
890 tool_call_id: None,
891 tool_calls: None,
892 reasoning_content: Some("thinking...".to_string()),
893 };
894 let json = serde_json::to_string(&msg).unwrap();
895 assert!(json.contains("reasoning_content"));
896 assert!(json.contains("thinking..."));
897 }
898
899 #[test]
904 fn adjust_temperature_for_o1_models() {
905 assert_eq!(OpenAiProvider::adjust_temperature_for_model("o1", 0.7), 1.0);
906 assert_eq!(
907 OpenAiProvider::adjust_temperature_for_model("o1-2024-12-17", 0.5),
908 1.0
909 );
910 }
911
912 #[test]
913 fn adjust_temperature_for_o3_models() {
914 assert_eq!(OpenAiProvider::adjust_temperature_for_model("o3", 0.7), 1.0);
915 assert_eq!(
916 OpenAiProvider::adjust_temperature_for_model("o3-2025-04-16", 0.5),
917 1.0
918 );
919 assert_eq!(
920 OpenAiProvider::adjust_temperature_for_model("o3-mini", 0.3),
921 1.0
922 );
923 assert_eq!(
924 OpenAiProvider::adjust_temperature_for_model("o3-mini-2025-01-31", 0.8),
925 1.0
926 );
927 }
928
929 #[test]
930 fn adjust_temperature_for_o4_models() {
931 assert_eq!(
932 OpenAiProvider::adjust_temperature_for_model("o4-mini", 0.7),
933 1.0
934 );
935 assert_eq!(
936 OpenAiProvider::adjust_temperature_for_model("o4-mini-2025-04-16", 0.5),
937 1.0
938 );
939 }
940
941 #[test]
942 fn adjust_temperature_for_gpt5_models() {
943 assert_eq!(
944 OpenAiProvider::adjust_temperature_for_model("gpt-5", 0.7),
945 1.0
946 );
947 assert_eq!(
948 OpenAiProvider::adjust_temperature_for_model("gpt-5-2025-08-07", 0.5),
949 1.0
950 );
951 assert_eq!(
952 OpenAiProvider::adjust_temperature_for_model("gpt-5-mini", 0.3),
953 1.0
954 );
955 assert_eq!(
956 OpenAiProvider::adjust_temperature_for_model("gpt-5-mini-2025-08-07", 0.8),
957 1.0
958 );
959 assert_eq!(
960 OpenAiProvider::adjust_temperature_for_model("gpt-5-nano", 0.6),
961 1.0
962 );
963 assert_eq!(
964 OpenAiProvider::adjust_temperature_for_model("gpt-5-nano-2025-08-07", 0.4),
965 1.0
966 );
967 }
968
969 #[test]
970 fn adjust_temperature_for_gpt5_chat_latest_models() {
971 assert_eq!(
972 OpenAiProvider::adjust_temperature_for_model("gpt-5.1-chat-latest", 0.7),
973 1.0
974 );
975 assert_eq!(
976 OpenAiProvider::adjust_temperature_for_model("gpt-5.2-chat-latest", 0.5),
977 1.0
978 );
979 assert_eq!(
980 OpenAiProvider::adjust_temperature_for_model("gpt-5.3-chat-latest", 0.3),
981 1.0
982 );
983 }
984
985 #[test]
986 fn adjust_temperature_preserves_for_standard_models() {
987 assert_eq!(
988 OpenAiProvider::adjust_temperature_for_model("gpt-4o", 0.7),
989 0.7
990 );
991 assert_eq!(
992 OpenAiProvider::adjust_temperature_for_model("gpt-4-turbo", 0.5),
993 0.5
994 );
995 assert_eq!(
996 OpenAiProvider::adjust_temperature_for_model("gpt-3.5-turbo", 0.3),
997 0.3
998 );
999 assert_eq!(
1000 OpenAiProvider::adjust_temperature_for_model("gpt-4", 1.0),
1001 1.0
1002 );
1003 }
1004
1005 #[test]
1006 fn adjust_temperature_handles_edge_cases() {
1007 assert_eq!(
1009 OpenAiProvider::adjust_temperature_for_model("gpt-4o", 0.0),
1010 0.0
1011 );
1012 assert_eq!(OpenAiProvider::adjust_temperature_for_model("o1", 1.0), 1.0);
1014 assert_eq!(
1015 OpenAiProvider::adjust_temperature_for_model("gpt-4o", 1.0),
1016 1.0
1017 );
1018 }
1019}