1mod auth;
20
21use crate::OneOrMany;
22use crate::client::{
23 self, ApiKey, Capabilities, Capable, DebugExt, Nothing, Provider, ProviderBuilder,
24 ProviderClient, Transport,
25};
26use crate::completion::{self, CompletionError};
27use crate::http_client::{self, HttpClientExt};
28use crate::providers::openai::responses_api::{
29 self, CompletionRequest as ResponsesRequest, Include,
30};
31use crate::streaming::StreamingCompletionResponse;
32use crate::wasm_compat::{WasmCompatSend, WasmCompatSync};
33use std::fmt::Debug;
34use std::path::{Path, PathBuf};
35use tracing::{Level, enabled, info_span};
36
37const CHATGPT_API_BASE_URL: &str = "https://chatgpt.com/backend-api/codex";
38const DEFAULT_ORIGINATOR: &str = "rig";
39const DEFAULT_INSTRUCTIONS: &str = "You are ChatGPT, a helpful AI assistant.";
40
41pub const GPT_5_4: &str = "gpt-5.4";
43pub const GPT_5_4_PRO: &str = "gpt-5.4-pro";
45pub const GPT_5_3_CODEX: &str = "gpt-5.3-codex";
47pub const GPT_5_3_CODEX_SPARK: &str = "gpt-5.3-codex-spark";
49pub const GPT_5_3_INSTANT: &str = "gpt-5.3-instant";
51pub const GPT_5_3_CHAT_LATEST: &str = "gpt-5.3-chat-latest";
53
54#[derive(Clone)]
55pub enum ChatGPTAuth {
56 AccessToken {
57 access_token: String,
58 account_id: Option<String>,
59 },
60 OAuth,
61}
62
63impl ApiKey for ChatGPTAuth {}
64
65impl<S> From<S> for ChatGPTAuth
66where
67 S: Into<String>,
68{
69 fn from(value: S) -> Self {
70 Self::AccessToken {
71 access_token: value.into(),
72 account_id: None,
73 }
74 }
75}
76
77impl Debug for ChatGPTAuth {
78 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
79 match self {
80 Self::AccessToken { .. } => f.write_str("AccessToken(<redacted>)"),
81 Self::OAuth => f.write_str("OAuth"),
82 }
83 }
84}
85
86#[derive(Debug, Clone)]
87pub struct ChatGPTBuilder {
88 auth_file: Option<PathBuf>,
89 default_instructions: Option<String>,
90 device_code_handler: auth::DeviceCodeHandler,
91 originator: String,
92 user_agent: Option<String>,
93}
94
95#[derive(Clone)]
96pub struct ChatGPTExt {
97 auth: auth::Authenticator,
98 default_instructions: Option<String>,
99 originator: String,
100 user_agent: String,
101}
102
103impl Debug for ChatGPTExt {
104 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
105 f.debug_struct("ChatGPTExt")
106 .field("auth", &self.auth)
107 .field("default_instructions", &self.default_instructions)
108 .field("originator", &self.originator)
109 .field("user_agent", &self.user_agent)
110 .finish()
111 }
112}
113
114pub type Client<H = reqwest::Client> = client::Client<ChatGPTExt, H>;
115pub type ClientBuilder<H = crate::markers::Missing> =
116 client::ClientBuilder<ChatGPTBuilder, ChatGPTAuth, H>;
117
118impl Default for ChatGPTBuilder {
119 fn default() -> Self {
120 Self {
121 auth_file: default_auth_file(),
122 default_instructions: Some(
123 std::env::var("CHATGPT_DEFAULT_INSTRUCTIONS")
124 .ok()
125 .filter(|value| !value.trim().is_empty())
126 .unwrap_or_else(|| DEFAULT_INSTRUCTIONS.to_string()),
127 ),
128 device_code_handler: auth::DeviceCodeHandler::default(),
129 originator: std::env::var("CHATGPT_ORIGINATOR")
130 .ok()
131 .filter(|value| !value.is_empty())
132 .unwrap_or_else(|| DEFAULT_ORIGINATOR.to_string()),
133 user_agent: std::env::var("CHATGPT_USER_AGENT")
134 .ok()
135 .filter(|value| !value.is_empty()),
136 }
137 }
138}
139
140impl Provider for ChatGPTExt {
141 type Builder = ChatGPTBuilder;
142
143 const VERIFY_PATH: &'static str = "";
144
145 fn with_custom(&self, req: http_client::Builder) -> http_client::Result<http_client::Builder> {
146 Ok(req
147 .header("originator", &self.originator)
148 .header("user-agent", &self.user_agent)
149 .header(http::header::ACCEPT, "text/event-stream"))
150 }
151
152 fn build_uri(&self, base_url: &str, path: &str, _transport: Transport) -> String {
153 format!(
154 "{}/{}",
155 base_url.trim_end_matches('/'),
156 path.trim_start_matches('/')
157 )
158 }
159}
160
161impl<H> Capabilities<H> for ChatGPTExt {
162 type Completion = Capable<ResponsesCompletionModel<H>>;
163 type Embeddings = Nothing;
164 type Transcription = Nothing;
165 type ModelListing = Nothing;
166 #[cfg(feature = "image")]
167 type ImageGeneration = Nothing;
168 #[cfg(feature = "audio")]
169 type AudioGeneration = Nothing;
170}
171
172impl DebugExt for ChatGPTExt {}
173
174impl ProviderBuilder for ChatGPTBuilder {
175 type Extension<H>
176 = ChatGPTExt
177 where
178 H: HttpClientExt;
179 type ApiKey = ChatGPTAuth;
180
181 const BASE_URL: &'static str = CHATGPT_API_BASE_URL;
182
183 fn build<H>(
184 builder: &client::ClientBuilder<Self, Self::ApiKey, H>,
185 ) -> http_client::Result<Self::Extension<H>>
186 where
187 H: HttpClientExt,
188 {
189 let auth = match builder.get_api_key() {
190 ChatGPTAuth::AccessToken {
191 access_token,
192 account_id,
193 } => auth::AuthSource::AccessToken {
194 access_token: access_token.clone(),
195 account_id: account_id.clone(),
196 },
197 ChatGPTAuth::OAuth => auth::AuthSource::OAuth,
198 };
199
200 let ext = builder.ext();
201
202 Ok(ChatGPTExt {
203 auth: auth::Authenticator::new(
204 auth,
205 ext.auth_file.clone(),
206 ext.device_code_handler.clone(),
207 ),
208 default_instructions: ext.default_instructions.clone(),
209 originator: ext.originator.clone(),
210 user_agent: ext.user_agent.clone().unwrap_or_else(default_user_agent),
211 })
212 }
213}
214
215impl ProviderClient for Client {
216 type Input = ChatGPTAuth;
217 type Error = crate::client::ProviderClientError;
218
219 fn from_env() -> Result<Self, Self::Error> {
220 let mut builder = Self::builder();
221
222 if let Some(base_url) = crate::client::optional_env_var("CHATGPT_API_BASE")?
223 .or(crate::client::optional_env_var("OPENAI_CHATGPT_API_BASE")?)
224 {
225 builder = builder.base_url(base_url);
226 }
227
228 if let Some(access_token) = crate::client::optional_env_var("CHATGPT_ACCESS_TOKEN")? {
229 let account_id = crate::client::optional_env_var("CHATGPT_ACCOUNT_ID")?;
230 builder
231 .api_key(ChatGPTAuth::AccessToken {
232 access_token,
233 account_id,
234 })
235 .build()
236 .map_err(Into::into)
237 } else {
238 builder.oauth().build().map_err(Into::into)
239 }
240 }
241
242 fn from_val(input: Self::Input) -> Result<Self, Self::Error> {
243 Self::builder().api_key(input).build().map_err(Into::into)
244 }
245}
246
247impl<H> client::ClientBuilder<ChatGPTBuilder, crate::markers::Missing, H> {
248 pub fn oauth(self) -> client::ClientBuilder<ChatGPTBuilder, ChatGPTAuth, H> {
249 self.api_key(ChatGPTAuth::OAuth)
250 }
251}
252
253impl<H> ClientBuilder<H> {
254 pub fn on_device_code<F>(self, handler: F) -> Self
255 where
256 F: Fn(auth::DeviceCodePrompt) + Send + Sync + 'static,
257 {
258 self.over_ext(|mut ext| {
259 ext.device_code_handler = auth::DeviceCodeHandler::new(handler);
260 ext
261 })
262 }
263
264 pub fn token_dir(self, path: impl AsRef<Path>) -> Self {
265 let auth_file = path.as_ref().join("auth.json");
266 self.over_ext(|mut ext| {
267 ext.auth_file = Some(auth_file);
268 ext
269 })
270 }
271
272 pub fn auth_file(self, path: impl AsRef<Path>) -> Self {
273 let auth_file = path.as_ref().to_path_buf();
274 self.over_ext(|mut ext| {
275 ext.auth_file = Some(auth_file);
276 ext
277 })
278 }
279
280 pub fn default_instructions(self, instructions: impl Into<String>) -> Self {
281 let instructions = instructions.into();
282 self.over_ext(|mut ext| {
283 ext.default_instructions = Some(instructions);
284 ext
285 })
286 }
287
288 pub fn originator(self, originator: impl Into<String>) -> Self {
289 let originator = originator.into();
290 self.over_ext(|mut ext| {
291 ext.originator = originator;
292 ext
293 })
294 }
295
296 pub fn user_agent(self, user_agent: impl Into<String>) -> Self {
297 let user_agent = user_agent.into();
298 self.over_ext(|mut ext| {
299 ext.user_agent = Some(user_agent);
300 ext
301 })
302 }
303}
304
305#[derive(Clone)]
306pub struct ResponsesCompletionModel<H = reqwest::Client> {
307 client: Client<H>,
308 pub model: String,
309 pub tools: Vec<responses_api::ResponsesToolDefinition>,
310}
311
312impl<H> ResponsesCompletionModel<H>
313where
314 Client<H>: HttpClientExt + Clone + Debug + 'static,
315 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
316{
317 pub fn new(client: Client<H>, model: impl Into<String>) -> Self {
318 Self {
319 client,
320 model: model.into(),
321 tools: Vec::new(),
322 }
323 }
324
325 pub fn with_tool(mut self, tool: impl Into<responses_api::ResponsesToolDefinition>) -> Self {
326 self.tools.push(tool.into());
327 self
328 }
329
330 pub fn with_tools<I, Tool>(mut self, tools: I) -> Self
331 where
332 I: IntoIterator<Item = Tool>,
333 Tool: Into<responses_api::ResponsesToolDefinition>,
334 {
335 self.tools.extend(tools.into_iter().map(Into::into));
336 self
337 }
338
339 fn openai_model(&self) -> responses_api::GenericResponsesCompletionModel<ChatGPTExt, H> {
340 let mut model = responses_api::GenericResponsesCompletionModel::new(
341 self.client.clone(),
342 self.model.clone(),
343 );
344 model.tools = self.tools.clone();
345 model
346 }
347
348 fn create_request(
349 &self,
350 request: completion::CompletionRequest,
351 ) -> Result<ResponsesRequest, CompletionError> {
352 let mut request = self.openai_model().create_completion_request(request)?;
353
354 if let Some(system_instructions) =
355 normalize_system_messages_into_instructions(&mut request)?
356 {
357 request.instructions = Some(match request.instructions.as_deref() {
358 Some(existing) if !existing.trim().is_empty() => {
359 format!("{system_instructions}\n\n{existing}")
360 }
361 _ => system_instructions,
362 });
363 }
364
365 if let Some(default_instructions) = &self.client.ext().default_instructions {
366 request.instructions = Some(merge_instructions(
367 default_instructions,
368 request.instructions.as_deref(),
369 ));
370 }
371
372 request.temperature = None;
373 request.max_output_tokens = None;
374 request.stream = Some(true);
375
376 let include = request
377 .additional_parameters
378 .include
379 .get_or_insert_with(Vec::new);
380 if !include
381 .iter()
382 .any(|item| matches!(item, Include::ReasoningEncryptedContent))
383 {
384 include.push(Include::ReasoningEncryptedContent);
385 }
386
387 request.additional_parameters.background = None;
388 request.additional_parameters.metadata.clear();
389 request.additional_parameters.parallel_tool_calls = None;
390 request.additional_parameters.service_tier = None;
391 request.additional_parameters.store = Some(false);
392 request.additional_parameters.text = None;
393 request.additional_parameters.top_p = None;
394 request.additional_parameters.user = None;
395
396 Ok(request)
397 }
398
399 fn add_auth_headers(
400 &self,
401 req: http_client::Builder,
402 context: &auth::AuthContext,
403 ) -> http_client::Builder {
404 let req = req
405 .header(
406 http::header::AUTHORIZATION,
407 format!("Bearer {}", context.access_token),
408 )
409 .header("session_id", nanoid::nanoid!());
410
411 if let Some(account_id) = &context.account_id {
412 req.header("ChatGPT-Account-Id", account_id)
413 } else {
414 req
415 }
416 }
417
418 async fn completion_from_sse(
419 &self,
420 request: ResponsesRequest,
421 ) -> Result<completion::CompletionResponse<responses_api::CompletionResponse>, CompletionError>
422 {
423 let body = serde_json::to_vec(&request)?;
424 let auth = self
425 .client
426 .ext()
427 .auth
428 .auth_context()
429 .await
430 .map_err(|err| CompletionError::ProviderError(err.to_string()))?;
431
432 let req = self
433 .add_auth_headers(self.client.post("/responses")?, &auth)
434 .body(body)
435 .map_err(|err| CompletionError::HttpError(err.into()))?;
436
437 let response = self.client.send(req).await?;
438 let text = http_client::text(response).await?;
439 let raw_response = responses_api::streaming::parse_sse_completion_body(&text, "ChatGPT")?;
440
441 match raw_response.clone().try_into() {
442 Ok(response) => Ok(response),
443 Err(CompletionError::ResponseError(message))
444 if message == "Response contained no parts" =>
445 {
446 responses_api::streaming::completion_response_from_sse_body(
447 &text,
448 raw_response,
449 "ChatGPT",
450 )
451 .await
452 }
453 Err(error) => Err(error),
454 }
455 }
456}
457
458impl<H> Client<H>
459where
460 H: HttpClientExt + Clone + Debug + Default + WasmCompatSend + WasmCompatSync + 'static,
461{
462 pub async fn authorize(&self) -> Result<(), auth::AuthError> {
463 self.ext().auth.auth_context().await.map(|_| ())
464 }
465}
466
467impl<H> completion::CompletionModel for ResponsesCompletionModel<H>
468where
469 Client<H>: HttpClientExt + Clone + Debug + 'static,
470 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
471{
472 type Response = responses_api::CompletionResponse;
473 type StreamingResponse = responses_api::streaming::StreamingCompletionResponse;
474 type Client = Client<H>;
475
476 fn make(client: &Self::Client, model: impl Into<String>) -> Self {
477 Self::new(client.clone(), model)
478 }
479
480 async fn completion(
481 &self,
482 completion_request: completion::CompletionRequest,
483 ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
484 let request = self.create_request(completion_request)?;
485
486 let span = if tracing::Span::current().is_disabled() {
487 info_span!(
488 target: "rig::completions",
489 "chat",
490 gen_ai.operation.name = "chat",
491 gen_ai.provider.name = "chatgpt",
492 gen_ai.request.model = self.model,
493 gen_ai.response.id = tracing::field::Empty,
494 gen_ai.response.model = tracing::field::Empty,
495 gen_ai.usage.output_tokens = tracing::field::Empty,
496 gen_ai.usage.input_tokens = tracing::field::Empty,
497 gen_ai.usage.cache_read.input_tokens = tracing::field::Empty,
498 gen_ai.input.messages = tracing::field::Empty,
499 gen_ai.output.messages = tracing::field::Empty,
500 )
501 } else {
502 tracing::Span::current()
503 };
504
505 tracing_futures::Instrument::instrument(
506 async move {
507 let response = self.completion_from_sse(request).await?;
508 let span = tracing::Span::current();
509 span.record("gen_ai.response.id", &response.raw_response.id);
510 span.record("gen_ai.response.model", &response.raw_response.model);
511 span.record("gen_ai.usage.output_tokens", response.usage.output_tokens);
512 span.record("gen_ai.usage.input_tokens", response.usage.input_tokens);
513 span.record(
514 "gen_ai.usage.cache_read.input_tokens",
515 response.usage.cached_input_tokens,
516 );
517 Ok(response)
518 },
519 span,
520 )
521 .await
522 }
523
524 async fn stream(
525 &self,
526 completion_request: completion::CompletionRequest,
527 ) -> Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
528 Self::stream(self, completion_request).await
529 }
530}
531
532impl<H> ResponsesCompletionModel<H>
533where
534 Client<H>: HttpClientExt + Clone + Debug + 'static,
535 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
536{
537 pub async fn stream(
538 &self,
539 completion_request: completion::CompletionRequest,
540 ) -> Result<
541 StreamingCompletionResponse<responses_api::streaming::StreamingCompletionResponse>,
542 CompletionError,
543 > {
544 let request = self.create_request(completion_request)?;
545
546 if enabled!(Level::TRACE) {
547 tracing::trace!(
548 target: "rig::completions",
549 "ChatGPT Responses streaming completion request: {}",
550 serde_json::to_string_pretty(&request)?
551 );
552 }
553
554 let body = serde_json::to_vec(&request)?;
555 let auth = self
556 .client
557 .ext()
558 .auth
559 .auth_context()
560 .await
561 .map_err(|err| CompletionError::ProviderError(err.to_string()))?;
562
563 let req = self
564 .add_auth_headers(self.client.post("/responses")?, &auth)
565 .body(body)
566 .map_err(|err| CompletionError::HttpError(err.into()))?;
567
568 let span = if tracing::Span::current().is_disabled() {
569 info_span!(
570 target: "rig::completions",
571 "chat_streaming",
572 gen_ai.operation.name = "chat_streaming",
573 gen_ai.provider.name = "chatgpt",
574 gen_ai.request.model = self.model,
575 gen_ai.response.id = tracing::field::Empty,
576 gen_ai.response.model = tracing::field::Empty,
577 gen_ai.usage.output_tokens = tracing::field::Empty,
578 gen_ai.usage.input_tokens = tracing::field::Empty,
579 gen_ai.usage.cache_read.input_tokens = tracing::field::Empty,
580 )
581 } else {
582 tracing::Span::current()
583 };
584
585 let client = self.client.clone();
586 let event_source = crate::http_client::sse::GenericEventSource::new(client, req)
587 .allow_missing_content_type();
588
589 Ok(responses_api::streaming::stream_from_event_source(
590 event_source,
591 span,
592 "ChatGPT",
593 ))
594 }
595}
596
597fn default_user_agent() -> String {
598 format!(
599 "rig/{} ({} {}; {})",
600 env!("CARGO_PKG_VERSION"),
601 std::env::consts::OS,
602 std::env::consts::ARCH,
603 DEFAULT_ORIGINATOR
604 )
605}
606
607fn default_auth_file() -> Option<PathBuf> {
608 config_dir().map(|dir| dir.join("chatgpt").join("auth.json"))
609}
610
611fn config_dir() -> Option<PathBuf> {
612 #[cfg(target_os = "windows")]
613 {
614 std::env::var_os("APPDATA").map(PathBuf::from)
615 }
616
617 #[cfg(not(target_os = "windows"))]
618 {
619 std::env::var_os("XDG_CONFIG_HOME")
620 .map(PathBuf::from)
621 .or_else(|| std::env::var_os("HOME").map(|home| PathBuf::from(home).join(".config")))
622 }
623}
624
625fn normalize_system_messages_into_instructions(
626 request: &mut ResponsesRequest,
627) -> Result<Option<String>, CompletionError> {
628 let mut system_instructions = Vec::new();
629 let mut filtered_items = Vec::new();
630
631 for item in request.input.clone() {
632 if let Some(system_text) = item.system_text() {
633 let system_text = system_text.trim();
634 if !system_text.is_empty() {
635 system_instructions.push(system_text.to_string());
636 }
637 } else {
638 filtered_items.push(item);
639 }
640 }
641
642 request.input = OneOrMany::many(filtered_items).map_err(|_| {
643 CompletionError::RequestError(
644 "ChatGPT responses request input must contain at least one non-system item".into(),
645 )
646 })?;
647
648 if system_instructions.is_empty() {
649 Ok(None)
650 } else {
651 Ok(Some(system_instructions.join("\n\n")))
652 }
653}
654
655fn merge_instructions(default_instructions: &str, existing_instructions: Option<&str>) -> String {
656 match existing_instructions
657 .map(str::trim)
658 .filter(|value| !value.is_empty())
659 {
660 Some(existing) if existing.contains(default_instructions) => existing.to_string(),
661 Some(existing) => format!("{default_instructions}\n\n{existing}"),
662 None => default_instructions.to_string(),
663 }
664}
665
666#[cfg(test)]
667mod tests {
668 use super::*;
669
670 #[test]
671 fn test_parse_chatgpt_sse_completion() {
672 let body = r#"data: {"type":"response.output_text.delta","delta":"hi"}
673data: {"type":"response.completed","response":{"id":"resp_1","object":"response","created_at":1,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-5","usage":{"input_tokens":1,"input_tokens_details":{"cached_tokens":0},"output_tokens":1,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":2},"output":[{"type":"message","id":"msg_1","status":"completed","role":"assistant","content":[{"type":"output_text","annotations":[],"text":"hi"}]}],"tools":[]}}
674data: [DONE]"#;
675
676 let response = responses_api::streaming::parse_sse_completion_body(body, "ChatGPT")
677 .expect("expected response");
678 assert_eq!(response.id, "resp_1");
679 assert_eq!(response.model, "gpt-5");
680 }
681
682 #[test]
683 fn test_client_initialization() {
684 let _client = crate::providers::chatgpt::Client::builder()
685 .oauth()
686 .build()
687 .expect("Client::builder()");
688 }
689
690 #[test]
691 fn test_merge_instructions_uses_default_when_missing() {
692 assert_eq!(
693 merge_instructions(DEFAULT_INSTRUCTIONS, None),
694 DEFAULT_INSTRUCTIONS
695 );
696 }
697
698 #[test]
699 fn test_merge_instructions_appends_existing_request_instructions() {
700 let merged = merge_instructions(DEFAULT_INSTRUCTIONS, Some("Respond tersely."));
701 assert!(merged.starts_with(DEFAULT_INSTRUCTIONS));
702 assert!(merged.ends_with("Respond tersely."));
703 }
704
705 #[test]
706 fn test_merge_instructions_avoids_duplicate_default() {
707 let merged = merge_instructions(
708 DEFAULT_INSTRUCTIONS,
709 Some("You are ChatGPT, a helpful AI assistant.\n\nRespond tersely."),
710 );
711 assert_eq!(
712 merged,
713 "You are ChatGPT, a helpful AI assistant.\n\nRespond tersely."
714 );
715 }
716
717 #[test]
718 fn test_normalize_system_messages_into_instructions() {
719 let completion_request = completion::CompletionRequest {
720 model: Some("gpt-5.4".to_string()),
721 preamble: Some("System one".to_string()),
722 chat_history: OneOrMany::many(vec![
723 completion::Message::system("System two"),
724 completion::Message::user("hi"),
725 ])
726 .expect("history"),
727 documents: Vec::new(),
728 tools: Vec::new(),
729 temperature: None,
730 max_tokens: None,
731 tool_choice: None,
732 additional_params: None,
733 output_schema: None,
734 };
735 let mut request = ResponsesRequest::try_from(("gpt-5.4".to_string(), completion_request))
736 .expect("request");
737
738 let instructions = normalize_system_messages_into_instructions(&mut request)
739 .expect("normalize")
740 .expect("instructions");
741
742 assert_eq!(instructions, "System one\n\nSystem two");
743 assert_eq!(request.input.len(), 1);
744 }
745
746 #[test]
747 fn test_create_request_drops_temperature() {
748 let client = crate::providers::chatgpt::Client::builder()
749 .oauth()
750 .build()
751 .expect("client");
752 let model = ResponsesCompletionModel::new(client, GPT_5_3_CODEX);
753
754 let request = model
755 .create_request(completion::CompletionRequest {
756 model: None,
757 preamble: None,
758 chat_history: OneOrMany::one(completion::Message::user("hello")),
759 documents: Vec::new(),
760 tools: Vec::new(),
761 temperature: Some(0.5),
762 max_tokens: None,
763 tool_choice: None,
764 additional_params: None,
765 output_schema: None,
766 })
767 .expect("request");
768
769 assert!(request.temperature.is_none());
770 }
771
772 #[tokio::test]
773 async fn test_completion_response_from_sse_body_falls_back_to_streamed_text() {
774 let body = r#"data: {"type":"response.output_text.delta","delta":"hi"}
775data: {"type":"response.completed","response":{"id":"resp_1","object":"response","created_at":1,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-5","usage":{"input_tokens":1,"input_tokens_details":{"cached_tokens":0},"output_tokens":1,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":2},"output":[],"tools":[]}}
776data: [DONE]"#;
777
778 let raw_response = responses_api::streaming::parse_sse_completion_body(body, "ChatGPT")
779 .expect("expected response");
780 let response = responses_api::streaming::completion_response_from_sse_body(
781 body,
782 raw_response,
783 "ChatGPT",
784 )
785 .await
786 .expect("fallback response");
787
788 let text: String = response
789 .choice
790 .iter()
791 .filter_map(|content| match content {
792 completion::AssistantContent::Text(text) => Some(text.text.as_str()),
793 _ => None,
794 })
795 .collect();
796
797 assert_eq!(text, "hi");
798 assert_eq!(response.usage.total_tokens, 2);
799 }
800}