1mod auth;
20
21use crate::OneOrMany;
22use crate::client::{
23 self, ApiKey, Capabilities, Capable, DebugExt, Nothing, Provider, ProviderBuilder,
24 ProviderClient, Transport,
25};
26use crate::completion::{self, CompletionError};
27use crate::http_client::{self, HttpClientExt};
28use crate::providers::openai::responses_api::{
29 self, CompletionRequest as ResponsesRequest, Include,
30};
31use crate::streaming::StreamingCompletionResponse;
32use crate::wasm_compat::{WasmCompatSend, WasmCompatSync};
33use std::fmt::Debug;
34use std::path::{Path, PathBuf};
35use tracing::{Level, enabled, info_span};
36
37const CHATGPT_API_BASE_URL: &str = "https://chatgpt.com/backend-api/codex";
38const DEFAULT_ORIGINATOR: &str = "rig";
39const DEFAULT_INSTRUCTIONS: &str = "You are ChatGPT, a helpful AI assistant.";
40
41pub const GPT_5_4: &str = "gpt-5.4";
43pub const GPT_5_4_PRO: &str = "gpt-5.4-pro";
45pub const GPT_5_3_CODEX: &str = "gpt-5.3-codex";
47pub const GPT_5_3_CODEX_SPARK: &str = "gpt-5.3-codex-spark";
49pub const GPT_5_3_INSTANT: &str = "gpt-5.3-instant";
51pub const GPT_5_3_CHAT_LATEST: &str = "gpt-5.3-chat-latest";
53
54#[derive(Clone)]
55pub enum ChatGPTAuth {
56 AccessToken {
57 access_token: String,
58 account_id: Option<String>,
59 },
60 OAuth,
61}
62
63impl ApiKey for ChatGPTAuth {}
64
65impl<S> From<S> for ChatGPTAuth
66where
67 S: Into<String>,
68{
69 fn from(value: S) -> Self {
70 Self::AccessToken {
71 access_token: value.into(),
72 account_id: None,
73 }
74 }
75}
76
77impl Debug for ChatGPTAuth {
78 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
79 match self {
80 Self::AccessToken { .. } => f.write_str("AccessToken(<redacted>)"),
81 Self::OAuth => f.write_str("OAuth"),
82 }
83 }
84}
85
86#[derive(Debug, Clone)]
87pub struct ChatGPTBuilder {
88 auth_file: Option<PathBuf>,
89 default_instructions: Option<String>,
90 device_code_handler: auth::DeviceCodeHandler,
91 originator: String,
92 user_agent: Option<String>,
93}
94
95#[derive(Clone)]
96pub struct ChatGPTExt {
97 auth: auth::Authenticator,
98 default_instructions: Option<String>,
99 originator: String,
100 user_agent: String,
101}
102
103impl Debug for ChatGPTExt {
104 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
105 f.debug_struct("ChatGPTExt")
106 .field("auth", &self.auth)
107 .field("default_instructions", &self.default_instructions)
108 .field("originator", &self.originator)
109 .field("user_agent", &self.user_agent)
110 .finish()
111 }
112}
113
114pub type Client<H = reqwest::Client> = client::Client<ChatGPTExt, H>;
115pub type ClientBuilder<H = reqwest::Client> = client::ClientBuilder<ChatGPTBuilder, ChatGPTAuth, H>;
116
117impl Default for ChatGPTBuilder {
118 fn default() -> Self {
119 Self {
120 auth_file: default_auth_file(),
121 default_instructions: Some(
122 std::env::var("CHATGPT_DEFAULT_INSTRUCTIONS")
123 .ok()
124 .filter(|value| !value.trim().is_empty())
125 .unwrap_or_else(|| DEFAULT_INSTRUCTIONS.to_string()),
126 ),
127 device_code_handler: auth::DeviceCodeHandler::default(),
128 originator: std::env::var("CHATGPT_ORIGINATOR")
129 .ok()
130 .filter(|value| !value.is_empty())
131 .unwrap_or_else(|| DEFAULT_ORIGINATOR.to_string()),
132 user_agent: std::env::var("CHATGPT_USER_AGENT")
133 .ok()
134 .filter(|value| !value.is_empty()),
135 }
136 }
137}
138
139impl Provider for ChatGPTExt {
140 type Builder = ChatGPTBuilder;
141
142 const VERIFY_PATH: &'static str = "";
143
144 fn with_custom(&self, req: http_client::Builder) -> http_client::Result<http_client::Builder> {
145 Ok(req
146 .header("originator", &self.originator)
147 .header("user-agent", &self.user_agent)
148 .header(http::header::ACCEPT, "text/event-stream"))
149 }
150
151 fn build_uri(&self, base_url: &str, path: &str, _transport: Transport) -> String {
152 format!(
153 "{}/{}",
154 base_url.trim_end_matches('/'),
155 path.trim_start_matches('/')
156 )
157 }
158}
159
160impl<H> Capabilities<H> for ChatGPTExt {
161 type Completion = Capable<ResponsesCompletionModel<H>>;
162 type Embeddings = Nothing;
163 type Transcription = Nothing;
164 type ModelListing = Nothing;
165 #[cfg(feature = "image")]
166 type ImageGeneration = Nothing;
167 #[cfg(feature = "audio")]
168 type AudioGeneration = Nothing;
169}
170
171impl DebugExt for ChatGPTExt {}
172
173impl ProviderBuilder for ChatGPTBuilder {
174 type Extension<H>
175 = ChatGPTExt
176 where
177 H: HttpClientExt;
178 type ApiKey = ChatGPTAuth;
179
180 const BASE_URL: &'static str = CHATGPT_API_BASE_URL;
181
182 fn build<H>(
183 builder: &client::ClientBuilder<Self, Self::ApiKey, H>,
184 ) -> http_client::Result<Self::Extension<H>>
185 where
186 H: HttpClientExt,
187 {
188 let auth = match builder.get_api_key() {
189 ChatGPTAuth::AccessToken {
190 access_token,
191 account_id,
192 } => auth::AuthSource::AccessToken {
193 access_token: access_token.clone(),
194 account_id: account_id.clone(),
195 },
196 ChatGPTAuth::OAuth => auth::AuthSource::OAuth,
197 };
198
199 let ext = builder.ext();
200
201 Ok(ChatGPTExt {
202 auth: auth::Authenticator::new(
203 auth,
204 ext.auth_file.clone(),
205 ext.device_code_handler.clone(),
206 ),
207 default_instructions: ext.default_instructions.clone(),
208 originator: ext.originator.clone(),
209 user_agent: ext.user_agent.clone().unwrap_or_else(default_user_agent),
210 })
211 }
212}
213
214impl ProviderClient for Client {
215 type Input = ChatGPTAuth;
216 type Error = crate::client::ProviderClientError;
217
218 fn from_env() -> Result<Self, Self::Error> {
219 let mut builder = Self::builder();
220
221 if let Some(base_url) = crate::client::optional_env_var("CHATGPT_API_BASE")?
222 .or(crate::client::optional_env_var("OPENAI_CHATGPT_API_BASE")?)
223 {
224 builder = builder.base_url(base_url);
225 }
226
227 if let Some(access_token) = crate::client::optional_env_var("CHATGPT_ACCESS_TOKEN")? {
228 let account_id = crate::client::optional_env_var("CHATGPT_ACCOUNT_ID")?;
229 builder
230 .api_key(ChatGPTAuth::AccessToken {
231 access_token,
232 account_id,
233 })
234 .build()
235 .map_err(Into::into)
236 } else {
237 builder.oauth().build().map_err(Into::into)
238 }
239 }
240
241 fn from_val(input: Self::Input) -> Result<Self, Self::Error> {
242 Self::builder().api_key(input).build().map_err(Into::into)
243 }
244}
245
246impl<H> client::ClientBuilder<ChatGPTBuilder, crate::markers::Missing, H> {
247 pub fn oauth(self) -> client::ClientBuilder<ChatGPTBuilder, ChatGPTAuth, H> {
248 self.api_key(ChatGPTAuth::OAuth)
249 }
250}
251
252impl<H> ClientBuilder<H> {
253 pub fn on_device_code<F>(self, handler: F) -> Self
254 where
255 F: Fn(auth::DeviceCodePrompt) + Send + Sync + 'static,
256 {
257 self.over_ext(|mut ext| {
258 ext.device_code_handler = auth::DeviceCodeHandler::new(handler);
259 ext
260 })
261 }
262
263 pub fn token_dir(self, path: impl AsRef<Path>) -> Self {
264 let auth_file = path.as_ref().join("auth.json");
265 self.over_ext(|mut ext| {
266 ext.auth_file = Some(auth_file);
267 ext
268 })
269 }
270
271 pub fn auth_file(self, path: impl AsRef<Path>) -> Self {
272 let auth_file = path.as_ref().to_path_buf();
273 self.over_ext(|mut ext| {
274 ext.auth_file = Some(auth_file);
275 ext
276 })
277 }
278
279 pub fn default_instructions(self, instructions: impl Into<String>) -> Self {
280 let instructions = instructions.into();
281 self.over_ext(|mut ext| {
282 ext.default_instructions = Some(instructions);
283 ext
284 })
285 }
286
287 pub fn originator(self, originator: impl Into<String>) -> Self {
288 let originator = originator.into();
289 self.over_ext(|mut ext| {
290 ext.originator = originator;
291 ext
292 })
293 }
294
295 pub fn user_agent(self, user_agent: impl Into<String>) -> Self {
296 let user_agent = user_agent.into();
297 self.over_ext(|mut ext| {
298 ext.user_agent = Some(user_agent);
299 ext
300 })
301 }
302}
303
304#[derive(Clone)]
305pub struct ResponsesCompletionModel<H = reqwest::Client> {
306 client: Client<H>,
307 pub model: String,
308 pub tools: Vec<responses_api::ResponsesToolDefinition>,
309}
310
311impl<H> ResponsesCompletionModel<H>
312where
313 Client<H>: HttpClientExt + Clone + Debug + 'static,
314 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
315{
316 pub fn new(client: Client<H>, model: impl Into<String>) -> Self {
317 Self {
318 client,
319 model: model.into(),
320 tools: Vec::new(),
321 }
322 }
323
324 pub fn with_tool(mut self, tool: impl Into<responses_api::ResponsesToolDefinition>) -> Self {
325 self.tools.push(tool.into());
326 self
327 }
328
329 pub fn with_tools<I, Tool>(mut self, tools: I) -> Self
330 where
331 I: IntoIterator<Item = Tool>,
332 Tool: Into<responses_api::ResponsesToolDefinition>,
333 {
334 self.tools.extend(tools.into_iter().map(Into::into));
335 self
336 }
337
338 fn openai_model(&self) -> responses_api::GenericResponsesCompletionModel<ChatGPTExt, H> {
339 let mut model = responses_api::GenericResponsesCompletionModel::new(
340 self.client.clone(),
341 self.model.clone(),
342 );
343 model.tools = self.tools.clone();
344 model
345 }
346
347 fn create_request(
348 &self,
349 request: completion::CompletionRequest,
350 ) -> Result<ResponsesRequest, CompletionError> {
351 let mut request = self.openai_model().create_completion_request(request)?;
352
353 if let Some(system_instructions) =
354 normalize_system_messages_into_instructions(&mut request)?
355 {
356 request.instructions = Some(match request.instructions.as_deref() {
357 Some(existing) if !existing.trim().is_empty() => {
358 format!("{system_instructions}\n\n{existing}")
359 }
360 _ => system_instructions,
361 });
362 }
363
364 if let Some(default_instructions) = &self.client.ext().default_instructions {
365 request.instructions = Some(merge_instructions(
366 default_instructions,
367 request.instructions.as_deref(),
368 ));
369 }
370
371 request.temperature = None;
372 request.max_output_tokens = None;
373 request.stream = Some(true);
374
375 let include = request
376 .additional_parameters
377 .include
378 .get_or_insert_with(Vec::new);
379 if !include
380 .iter()
381 .any(|item| matches!(item, Include::ReasoningEncryptedContent))
382 {
383 include.push(Include::ReasoningEncryptedContent);
384 }
385
386 request.additional_parameters.background = None;
387 request.additional_parameters.metadata.clear();
388 request.additional_parameters.parallel_tool_calls = None;
389 request.additional_parameters.service_tier = None;
390 request.additional_parameters.store = Some(false);
391 request.additional_parameters.text = None;
392 request.additional_parameters.top_p = None;
393 request.additional_parameters.user = None;
394
395 Ok(request)
396 }
397
398 fn add_auth_headers(
399 &self,
400 req: http_client::Builder,
401 context: &auth::AuthContext,
402 ) -> http_client::Builder {
403 let req = req
404 .header(
405 http::header::AUTHORIZATION,
406 format!("Bearer {}", context.access_token),
407 )
408 .header("session_id", nanoid::nanoid!());
409
410 if let Some(account_id) = &context.account_id {
411 req.header("ChatGPT-Account-Id", account_id)
412 } else {
413 req
414 }
415 }
416
417 async fn completion_from_sse(
418 &self,
419 request: ResponsesRequest,
420 ) -> Result<completion::CompletionResponse<responses_api::CompletionResponse>, CompletionError>
421 {
422 let body = serde_json::to_vec(&request)?;
423 let auth = self
424 .client
425 .ext()
426 .auth
427 .auth_context()
428 .await
429 .map_err(|err| CompletionError::ProviderError(err.to_string()))?;
430
431 let req = self
432 .add_auth_headers(self.client.post("/responses")?, &auth)
433 .body(body)
434 .map_err(|err| CompletionError::HttpError(err.into()))?;
435
436 let response = self.client.send(req).await?;
437 let text = http_client::text(response).await?;
438 let raw_response = responses_api::streaming::parse_sse_completion_body(&text, "ChatGPT")?;
439
440 match raw_response.clone().try_into() {
441 Ok(response) => Ok(response),
442 Err(CompletionError::ResponseError(message))
443 if message == "Response contained no parts" =>
444 {
445 responses_api::streaming::completion_response_from_sse_body(
446 &text,
447 raw_response,
448 "ChatGPT",
449 )
450 .await
451 }
452 Err(error) => Err(error),
453 }
454 }
455}
456
457impl<H> Client<H>
458where
459 H: HttpClientExt + Clone + Debug + Default + WasmCompatSend + WasmCompatSync + 'static,
460{
461 pub async fn authorize(&self) -> Result<(), auth::AuthError> {
462 self.ext().auth.auth_context().await.map(|_| ())
463 }
464}
465
466impl<H> completion::CompletionModel for ResponsesCompletionModel<H>
467where
468 Client<H>: HttpClientExt + Clone + Debug + 'static,
469 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
470{
471 type Response = responses_api::CompletionResponse;
472 type StreamingResponse = responses_api::streaming::StreamingCompletionResponse;
473 type Client = Client<H>;
474
475 fn make(client: &Self::Client, model: impl Into<String>) -> Self {
476 Self::new(client.clone(), model)
477 }
478
479 async fn completion(
480 &self,
481 completion_request: completion::CompletionRequest,
482 ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
483 let request = self.create_request(completion_request)?;
484
485 let span = if tracing::Span::current().is_disabled() {
486 info_span!(
487 target: "rig::completions",
488 "chat",
489 gen_ai.operation.name = "chat",
490 gen_ai.provider.name = "chatgpt",
491 gen_ai.request.model = self.model,
492 gen_ai.response.id = tracing::field::Empty,
493 gen_ai.response.model = tracing::field::Empty,
494 gen_ai.usage.output_tokens = tracing::field::Empty,
495 gen_ai.usage.input_tokens = tracing::field::Empty,
496 gen_ai.usage.cache_read.input_tokens = tracing::field::Empty,
497 gen_ai.input.messages = tracing::field::Empty,
498 gen_ai.output.messages = tracing::field::Empty,
499 )
500 } else {
501 tracing::Span::current()
502 };
503
504 tracing_futures::Instrument::instrument(
505 async move {
506 let response = self.completion_from_sse(request).await?;
507 let span = tracing::Span::current();
508 span.record("gen_ai.response.id", &response.raw_response.id);
509 span.record("gen_ai.response.model", &response.raw_response.model);
510 span.record("gen_ai.usage.output_tokens", response.usage.output_tokens);
511 span.record("gen_ai.usage.input_tokens", response.usage.input_tokens);
512 span.record(
513 "gen_ai.usage.cache_read.input_tokens",
514 response.usage.cached_input_tokens,
515 );
516 Ok(response)
517 },
518 span,
519 )
520 .await
521 }
522
523 async fn stream(
524 &self,
525 completion_request: completion::CompletionRequest,
526 ) -> Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
527 Self::stream(self, completion_request).await
528 }
529}
530
531impl<H> ResponsesCompletionModel<H>
532where
533 Client<H>: HttpClientExt + Clone + Debug + 'static,
534 H: Clone + Default + Debug + WasmCompatSend + WasmCompatSync + 'static,
535{
536 pub async fn stream(
537 &self,
538 completion_request: completion::CompletionRequest,
539 ) -> Result<
540 StreamingCompletionResponse<responses_api::streaming::StreamingCompletionResponse>,
541 CompletionError,
542 > {
543 let request = self.create_request(completion_request)?;
544
545 if enabled!(Level::TRACE) {
546 tracing::trace!(
547 target: "rig::completions",
548 "ChatGPT Responses streaming completion request: {}",
549 serde_json::to_string_pretty(&request)?
550 );
551 }
552
553 let body = serde_json::to_vec(&request)?;
554 let auth = self
555 .client
556 .ext()
557 .auth
558 .auth_context()
559 .await
560 .map_err(|err| CompletionError::ProviderError(err.to_string()))?;
561
562 let req = self
563 .add_auth_headers(self.client.post("/responses")?, &auth)
564 .body(body)
565 .map_err(|err| CompletionError::HttpError(err.into()))?;
566
567 let span = if tracing::Span::current().is_disabled() {
568 info_span!(
569 target: "rig::completions",
570 "chat_streaming",
571 gen_ai.operation.name = "chat_streaming",
572 gen_ai.provider.name = "chatgpt",
573 gen_ai.request.model = self.model,
574 gen_ai.response.id = tracing::field::Empty,
575 gen_ai.response.model = tracing::field::Empty,
576 gen_ai.usage.output_tokens = tracing::field::Empty,
577 gen_ai.usage.input_tokens = tracing::field::Empty,
578 gen_ai.usage.cache_read.input_tokens = tracing::field::Empty,
579 )
580 } else {
581 tracing::Span::current()
582 };
583
584 let client = self.client.clone();
585 let event_source = crate::http_client::sse::GenericEventSource::new(client, req)
586 .allow_missing_content_type();
587
588 Ok(responses_api::streaming::stream_from_event_source(
589 event_source,
590 span,
591 "ChatGPT",
592 ))
593 }
594}
595
596fn default_user_agent() -> String {
597 format!(
598 "rig/{} ({} {}; {})",
599 env!("CARGO_PKG_VERSION"),
600 std::env::consts::OS,
601 std::env::consts::ARCH,
602 DEFAULT_ORIGINATOR
603 )
604}
605
606fn default_auth_file() -> Option<PathBuf> {
607 config_dir().map(|dir| dir.join("chatgpt").join("auth.json"))
608}
609
610fn config_dir() -> Option<PathBuf> {
611 #[cfg(target_os = "windows")]
612 {
613 std::env::var_os("APPDATA").map(PathBuf::from)
614 }
615
616 #[cfg(not(target_os = "windows"))]
617 {
618 std::env::var_os("XDG_CONFIG_HOME")
619 .map(PathBuf::from)
620 .or_else(|| std::env::var_os("HOME").map(|home| PathBuf::from(home).join(".config")))
621 }
622}
623
624fn normalize_system_messages_into_instructions(
625 request: &mut ResponsesRequest,
626) -> Result<Option<String>, CompletionError> {
627 let mut system_instructions = Vec::new();
628 let mut filtered_items = Vec::new();
629
630 for item in request.input.clone() {
631 if let Some(system_text) = item.system_text() {
632 let system_text = system_text.trim();
633 if !system_text.is_empty() {
634 system_instructions.push(system_text.to_string());
635 }
636 } else {
637 filtered_items.push(item);
638 }
639 }
640
641 request.input = OneOrMany::many(filtered_items).map_err(|_| {
642 CompletionError::RequestError(
643 "ChatGPT responses request input must contain at least one non-system item".into(),
644 )
645 })?;
646
647 if system_instructions.is_empty() {
648 Ok(None)
649 } else {
650 Ok(Some(system_instructions.join("\n\n")))
651 }
652}
653
654fn merge_instructions(default_instructions: &str, existing_instructions: Option<&str>) -> String {
655 match existing_instructions
656 .map(str::trim)
657 .filter(|value| !value.is_empty())
658 {
659 Some(existing) if existing.contains(default_instructions) => existing.to_string(),
660 Some(existing) => format!("{default_instructions}\n\n{existing}"),
661 None => default_instructions.to_string(),
662 }
663}
664
665#[cfg(test)]
666mod tests {
667 use super::*;
668
669 #[test]
670 fn test_parse_chatgpt_sse_completion() {
671 let body = r#"data: {"type":"response.output_text.delta","delta":"hi"}
672data: {"type":"response.completed","response":{"id":"resp_1","object":"response","created_at":1,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-5","usage":{"input_tokens":1,"input_tokens_details":{"cached_tokens":0},"output_tokens":1,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":2},"output":[{"type":"message","id":"msg_1","status":"completed","role":"assistant","content":[{"type":"output_text","annotations":[],"text":"hi"}]}],"tools":[]}}
673data: [DONE]"#;
674
675 let response = responses_api::streaming::parse_sse_completion_body(body, "ChatGPT")
676 .expect("expected response");
677 assert_eq!(response.id, "resp_1");
678 assert_eq!(response.model, "gpt-5");
679 }
680
681 #[test]
682 fn test_client_initialization() {
683 let _client = crate::providers::chatgpt::Client::builder()
684 .oauth()
685 .build()
686 .expect("Client::builder()");
687 }
688
689 #[test]
690 fn test_merge_instructions_uses_default_when_missing() {
691 assert_eq!(
692 merge_instructions(DEFAULT_INSTRUCTIONS, None),
693 DEFAULT_INSTRUCTIONS
694 );
695 }
696
697 #[test]
698 fn test_merge_instructions_appends_existing_request_instructions() {
699 let merged = merge_instructions(DEFAULT_INSTRUCTIONS, Some("Respond tersely."));
700 assert!(merged.starts_with(DEFAULT_INSTRUCTIONS));
701 assert!(merged.ends_with("Respond tersely."));
702 }
703
704 #[test]
705 fn test_merge_instructions_avoids_duplicate_default() {
706 let merged = merge_instructions(
707 DEFAULT_INSTRUCTIONS,
708 Some("You are ChatGPT, a helpful AI assistant.\n\nRespond tersely."),
709 );
710 assert_eq!(
711 merged,
712 "You are ChatGPT, a helpful AI assistant.\n\nRespond tersely."
713 );
714 }
715
716 #[test]
717 fn test_normalize_system_messages_into_instructions() {
718 let completion_request = completion::CompletionRequest {
719 model: Some("gpt-5.4".to_string()),
720 preamble: Some("System one".to_string()),
721 chat_history: OneOrMany::many(vec![
722 completion::Message::system("System two"),
723 completion::Message::user("hi"),
724 ])
725 .expect("history"),
726 documents: Vec::new(),
727 tools: Vec::new(),
728 temperature: None,
729 max_tokens: None,
730 tool_choice: None,
731 additional_params: None,
732 output_schema: None,
733 };
734 let mut request = ResponsesRequest::try_from(("gpt-5.4".to_string(), completion_request))
735 .expect("request");
736
737 let instructions = normalize_system_messages_into_instructions(&mut request)
738 .expect("normalize")
739 .expect("instructions");
740
741 assert_eq!(instructions, "System one\n\nSystem two");
742 assert_eq!(request.input.len(), 1);
743 }
744
745 #[test]
746 fn test_create_request_drops_temperature() {
747 let client = crate::providers::chatgpt::Client::builder()
748 .oauth()
749 .build()
750 .expect("client");
751 let model = ResponsesCompletionModel::new(client, GPT_5_3_CODEX);
752
753 let request = model
754 .create_request(completion::CompletionRequest {
755 model: None,
756 preamble: None,
757 chat_history: OneOrMany::one(completion::Message::user("hello")),
758 documents: Vec::new(),
759 tools: Vec::new(),
760 temperature: Some(0.5),
761 max_tokens: None,
762 tool_choice: None,
763 additional_params: None,
764 output_schema: None,
765 })
766 .expect("request");
767
768 assert!(request.temperature.is_none());
769 }
770
771 #[tokio::test]
772 async fn test_completion_response_from_sse_body_falls_back_to_streamed_text() {
773 let body = r#"data: {"type":"response.output_text.delta","delta":"hi"}
774data: {"type":"response.completed","response":{"id":"resp_1","object":"response","created_at":1,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-5","usage":{"input_tokens":1,"input_tokens_details":{"cached_tokens":0},"output_tokens":1,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":2},"output":[],"tools":[]}}
775data: [DONE]"#;
776
777 let raw_response = responses_api::streaming::parse_sse_completion_body(body, "ChatGPT")
778 .expect("expected response");
779 let response = responses_api::streaming::completion_response_from_sse_body(
780 body,
781 raw_response,
782 "ChatGPT",
783 )
784 .await
785 .expect("fallback response");
786
787 let text: String = response
788 .choice
789 .iter()
790 .filter_map(|content| match content {
791 completion::AssistantContent::Text(text) => Some(text.text.as_str()),
792 _ => None,
793 })
794 .collect();
795
796 assert_eq!(text, "hi");
797 assert_eq!(response.usage.total_tokens, 2);
798 }
799}