Skip to main content

rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::InputAudio;
11use super::completion::ToolChoice;
12use super::{Client, responses_api::streaming::StreamingCompletionResponse};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::wasm_compat::{WasmCompatSend, WasmCompatSync};
24use crate::{OneOrMany, completion, message};
25use serde::{Deserialize, Serialize};
26use serde_json::{Map, Value};
27use tracing::{Instrument, Level, enabled, info_span};
28
29use std::convert::Infallible;
30use std::ops::Add;
31use std::str::FromStr;
32
33pub mod streaming;
34
35/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
36/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
37#[derive(Debug, Deserialize, Serialize, Clone)]
38pub struct CompletionRequest {
39    /// Message inputs
40    pub input: OneOrMany<InputItem>,
41    /// The model name
42    pub model: String,
43    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub instructions: Option<String>,
46    /// The maximum number of output tokens.
47    #[serde(skip_serializing_if = "Option::is_none")]
48    pub max_output_tokens: Option<u64>,
49    /// Toggle to true for streaming responses.
50    #[serde(skip_serializing_if = "Option::is_none")]
51    pub stream: Option<bool>,
52    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub temperature: Option<f64>,
55    /// Whether the LLM should be forced to use a tool before returning a response.
56    /// If none provided, the default option is "auto".
57    #[serde(skip_serializing_if = "Option::is_none")]
58    tool_choice: Option<ToolChoice>,
59    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
60    #[serde(skip_serializing_if = "Vec::is_empty")]
61    pub tools: Vec<ResponsesToolDefinition>,
62    /// Additional parameters
63    #[serde(flatten)]
64    pub additional_parameters: AdditionalParameters,
65}
66
67impl CompletionRequest {
68    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
69    where
70        S: Into<String>,
71    {
72        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
73
74        self
75    }
76
77    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
78        self.additional_parameters.reasoning = Some(reasoning);
79
80        self
81    }
82}
83
84/// An input item for [`CompletionRequest`].
85#[derive(Debug, Deserialize, Clone)]
86pub struct InputItem {
87    /// The role of an input item/message.
88    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
89    /// Everything else should be None.
90    #[serde(skip_serializing_if = "Option::is_none")]
91    role: Option<Role>,
92    /// The input content itself.
93    #[serde(flatten)]
94    input: InputContent,
95}
96
97impl Serialize for InputItem {
98    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
99    where
100        S: serde::Serializer,
101    {
102        let mut value = serde_json::to_value(&self.input).map_err(serde::ser::Error::custom)?;
103        let map = value.as_object_mut().ok_or_else(|| {
104            serde::ser::Error::custom("Input content must serialize to an object")
105        })?;
106
107        if let Some(role) = &self.role
108            && !map.contains_key("role")
109        {
110            map.insert(
111                "role".to_string(),
112                serde_json::to_value(role).map_err(serde::ser::Error::custom)?,
113            );
114        }
115
116        value.serialize(serializer)
117    }
118}
119
120impl InputItem {
121    pub fn system_message(content: impl Into<String>) -> Self {
122        Self {
123            role: Some(Role::System),
124            input: InputContent::Message(Message::System {
125                content: OneOrMany::one(SystemContent::InputText {
126                    text: content.into(),
127                }),
128                name: None,
129            }),
130        }
131    }
132}
133
134/// Message roles. Used by OpenAI Responses API to determine who created a given message.
135#[derive(Debug, Deserialize, Serialize, Clone)]
136#[serde(rename_all = "lowercase")]
137pub enum Role {
138    User,
139    Assistant,
140    System,
141}
142
143/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
144#[derive(Debug, Deserialize, Serialize, Clone)]
145#[serde(tag = "type", rename_all = "snake_case")]
146pub enum InputContent {
147    Message(Message),
148    Reasoning(OpenAIReasoning),
149    FunctionCall(OutputFunctionCall),
150    FunctionCallOutput(ToolResult),
151}
152
153#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
154pub struct OpenAIReasoning {
155    id: String,
156    pub summary: Vec<ReasoningSummary>,
157    pub encrypted_content: Option<String>,
158    #[serde(skip_serializing_if = "Option::is_none")]
159    pub status: Option<ToolStatus>,
160}
161
162#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
163#[serde(tag = "type", rename_all = "snake_case")]
164pub enum ReasoningSummary {
165    SummaryText { text: String },
166}
167
168impl ReasoningSummary {
169    fn new(input: &str) -> Self {
170        Self::SummaryText {
171            text: input.to_string(),
172        }
173    }
174
175    pub fn text(&self) -> String {
176        let ReasoningSummary::SummaryText { text } = self;
177        text.clone()
178    }
179}
180
181/// A tool result.
182#[derive(Debug, Deserialize, Serialize, Clone)]
183pub struct ToolResult {
184    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
185    call_id: String,
186    /// The result of a tool call.
187    output: String,
188    /// The status of a tool call (if used in a completion request, this should always be Completed)
189    status: ToolStatus,
190}
191
192impl From<Message> for InputItem {
193    fn from(value: Message) -> Self {
194        match value {
195            Message::User { .. } => Self {
196                role: Some(Role::User),
197                input: InputContent::Message(value),
198            },
199            Message::Assistant { ref content, .. } => {
200                let role = if content
201                    .clone()
202                    .iter()
203                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
204                {
205                    None
206                } else {
207                    Some(Role::Assistant)
208                };
209                Self {
210                    role,
211                    input: InputContent::Message(value),
212                }
213            }
214            Message::System { .. } => Self {
215                role: Some(Role::System),
216                input: InputContent::Message(value),
217            },
218            Message::ToolResult {
219                tool_call_id,
220                output,
221            } => Self {
222                role: None,
223                input: InputContent::FunctionCallOutput(ToolResult {
224                    call_id: tool_call_id,
225                    output,
226                    status: ToolStatus::Completed,
227                }),
228            },
229        }
230    }
231}
232
233impl TryFrom<crate::completion::Message> for Vec<InputItem> {
234    type Error = CompletionError;
235
236    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
237        match value {
238            crate::completion::Message::User { content } => {
239                let mut items = Vec::new();
240
241                for user_content in content {
242                    match user_content {
243                        crate::message::UserContent::Text(Text { text }) => {
244                            items.push(InputItem {
245                                role: Some(Role::User),
246                                input: InputContent::Message(Message::User {
247                                    content: OneOrMany::one(UserContent::InputText { text }),
248                                    name: None,
249                                }),
250                            });
251                        }
252                        crate::message::UserContent::ToolResult(
253                            crate::completion::message::ToolResult {
254                                call_id,
255                                content: tool_content,
256                                ..
257                            },
258                        ) => {
259                            for tool_result_content in tool_content {
260                                let crate::completion::message::ToolResultContent::Text(Text {
261                                    text,
262                                }) = tool_result_content
263                                else {
264                                    return Err(CompletionError::ProviderError(
265                                        "This thing only supports text!".to_string(),
266                                    ));
267                                };
268                                // let output = serde_json::from_str(&text)?;
269                                items.push(InputItem {
270                                    role: None,
271                                    input: InputContent::FunctionCallOutput(ToolResult {
272                                        call_id: call_id
273                                            .clone()
274                                            .expect("The call ID of this tool should exist!"),
275                                        output: text,
276                                        status: ToolStatus::Completed,
277                                    }),
278                                });
279                            }
280                        }
281                        crate::message::UserContent::Document(Document {
282                            data,
283                            media_type: Some(DocumentMediaType::PDF),
284                            ..
285                        }) => {
286                            let (file_data, file_url) = match data {
287                                DocumentSourceKind::Base64(data) => {
288                                    (Some(format!("data:application/pdf;base64,{data}")), None)
289                                }
290                                DocumentSourceKind::Url(url) => (None, Some(url)),
291                                DocumentSourceKind::Raw(_) => {
292                                    return Err(CompletionError::RequestError(
293                                        "Raw file data not supported, encode as base64 first"
294                                            .into(),
295                                    ));
296                                }
297                                doc => {
298                                    return Err(CompletionError::RequestError(
299                                        format!("Unsupported document type: {doc}").into(),
300                                    ));
301                                }
302                            };
303
304                            items.push(InputItem {
305                                role: Some(Role::User),
306                                input: InputContent::Message(Message::User {
307                                    content: OneOrMany::one(UserContent::InputFile {
308                                        file_data,
309                                        file_url,
310                                        filename: Some("document.pdf".to_string()),
311                                    }),
312                                    name: None,
313                                }),
314                            })
315                        }
316                        // todo: should we ensure this takes into account file size?
317                        crate::message::UserContent::Document(Document {
318                            data: DocumentSourceKind::Base64(text),
319                            ..
320                        }) => items.push(InputItem {
321                            role: Some(Role::User),
322                            input: InputContent::Message(Message::User {
323                                content: OneOrMany::one(UserContent::InputText { text }),
324                                name: None,
325                            }),
326                        }),
327                        crate::message::UserContent::Document(Document {
328                            data: DocumentSourceKind::String(text),
329                            ..
330                        }) => items.push(InputItem {
331                            role: Some(Role::User),
332                            input: InputContent::Message(Message::User {
333                                content: OneOrMany::one(UserContent::InputText { text }),
334                                name: None,
335                            }),
336                        }),
337                        crate::message::UserContent::Image(crate::message::Image {
338                            data,
339                            media_type,
340                            detail,
341                            ..
342                        }) => {
343                            let url = match data {
344                                DocumentSourceKind::Base64(data) => {
345                                    let media_type = if let Some(media_type) = media_type {
346                                        media_type.to_mime_type().to_string()
347                                    } else {
348                                        String::new()
349                                    };
350                                    format!("data:{media_type};base64,{data}")
351                                }
352                                DocumentSourceKind::Url(url) => url,
353                                DocumentSourceKind::Raw(_) => {
354                                    return Err(CompletionError::RequestError(
355                                        "Raw file data not supported, encode as base64 first"
356                                            .into(),
357                                    ));
358                                }
359                                doc => {
360                                    return Err(CompletionError::RequestError(
361                                        format!("Unsupported document type: {doc}").into(),
362                                    ));
363                                }
364                            };
365                            items.push(InputItem {
366                                role: Some(Role::User),
367                                input: InputContent::Message(Message::User {
368                                    content: OneOrMany::one(UserContent::InputImage {
369                                        image_url: url,
370                                        detail: detail.unwrap_or_default(),
371                                    }),
372                                    name: None,
373                                }),
374                            });
375                        }
376                        message => {
377                            return Err(CompletionError::ProviderError(format!(
378                                "Unsupported message: {message:?}"
379                            )));
380                        }
381                    }
382                }
383
384                Ok(items)
385            }
386            crate::completion::Message::Assistant { id, content } => {
387                let mut items = Vec::new();
388
389                for assistant_content in content {
390                    match assistant_content {
391                        crate::message::AssistantContent::Text(Text { text }) => {
392                            let id = id.as_ref().unwrap_or(&String::default()).clone();
393                            items.push(InputItem {
394                                role: Some(Role::Assistant),
395                                input: InputContent::Message(Message::Assistant {
396                                    content: OneOrMany::one(AssistantContentType::Text(
397                                        AssistantContent::OutputText(Text { text }),
398                                    )),
399                                    id,
400                                    name: None,
401                                    status: ToolStatus::Completed,
402                                }),
403                            });
404                        }
405                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
406                            id: tool_id,
407                            call_id,
408                            function,
409                            ..
410                        }) => {
411                            items.push(InputItem {
412                                role: None,
413                                input: InputContent::FunctionCall(OutputFunctionCall {
414                                    arguments: function.arguments,
415                                    call_id: call_id.expect("The tool call ID should exist!"),
416                                    id: tool_id,
417                                    name: function.name,
418                                    status: ToolStatus::Completed,
419                                }),
420                            });
421                        }
422                        crate::message::AssistantContent::Reasoning(
423                            crate::message::Reasoning { id, reasoning, .. },
424                        ) => {
425                            items.push(InputItem {
426                                role: None,
427                                input: InputContent::Reasoning(OpenAIReasoning {
428                                    id: id
429                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
430                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
431                                    encrypted_content: None,
432                                    status: None,
433                                }),
434                            });
435                        }
436                        crate::message::AssistantContent::Image(_) => {
437                            return Err(CompletionError::ProviderError(
438                                "Assistant image content is not supported in OpenAI Responses API"
439                                    .to_string(),
440                            ));
441                        }
442                    }
443                }
444
445                Ok(items)
446            }
447        }
448    }
449}
450
451impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
452    fn from(value: OneOrMany<String>) -> Self {
453        value.iter().map(|x| ReasoningSummary::new(x)).collect()
454    }
455}
456
457/// The definition of a tool response, repurposed for OpenAI's Responses API.
458#[derive(Debug, Deserialize, Serialize, Clone)]
459pub struct ResponsesToolDefinition {
460    /// Tool name
461    pub name: String,
462    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
463    pub parameters: serde_json::Value,
464    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
465    pub strict: bool,
466    /// The type of tool. This should always be "function".
467    #[serde(rename = "type")]
468    pub kind: String,
469    /// Tool description.
470    pub description: String,
471}
472
473impl From<completion::ToolDefinition> for ResponsesToolDefinition {
474    fn from(value: completion::ToolDefinition) -> Self {
475        let completion::ToolDefinition {
476            name,
477            mut parameters,
478            description,
479        } = value;
480
481        super::sanitize_schema(&mut parameters);
482
483        Self {
484            name,
485            parameters,
486            description,
487            kind: "function".to_string(),
488            strict: true,
489        }
490    }
491}
492
493/// Token usage.
494/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
495#[derive(Clone, Debug, Serialize, Deserialize)]
496pub struct ResponsesUsage {
497    /// Input tokens
498    pub input_tokens: u64,
499    /// In-depth detail on input tokens (cached tokens)
500    #[serde(skip_serializing_if = "Option::is_none")]
501    pub input_tokens_details: Option<InputTokensDetails>,
502    /// Output tokens
503    pub output_tokens: u64,
504    /// In-depth detail on output tokens (reasoning tokens)
505    pub output_tokens_details: OutputTokensDetails,
506    /// Total tokens used (for a given prompt)
507    pub total_tokens: u64,
508}
509
510impl ResponsesUsage {
511    /// Create a new ResponsesUsage instance
512    pub(crate) fn new() -> Self {
513        Self {
514            input_tokens: 0,
515            input_tokens_details: Some(InputTokensDetails::new()),
516            output_tokens: 0,
517            output_tokens_details: OutputTokensDetails::new(),
518            total_tokens: 0,
519        }
520    }
521}
522
523impl Add for ResponsesUsage {
524    type Output = Self;
525
526    fn add(self, rhs: Self) -> Self::Output {
527        let input_tokens = self.input_tokens + rhs.input_tokens;
528        let input_tokens_details = self.input_tokens_details.map(|lhs| {
529            if let Some(tokens) = rhs.input_tokens_details {
530                lhs + tokens
531            } else {
532                lhs
533            }
534        });
535        let output_tokens = self.output_tokens + rhs.output_tokens;
536        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
537        let total_tokens = self.total_tokens + rhs.total_tokens;
538        Self {
539            input_tokens,
540            input_tokens_details,
541            output_tokens,
542            output_tokens_details,
543            total_tokens,
544        }
545    }
546}
547
548/// In-depth details on input tokens.
549#[derive(Clone, Debug, Serialize, Deserialize)]
550pub struct InputTokensDetails {
551    /// Cached tokens from OpenAI
552    pub cached_tokens: u64,
553}
554
555impl InputTokensDetails {
556    pub(crate) fn new() -> Self {
557        Self { cached_tokens: 0 }
558    }
559}
560
561impl Add for InputTokensDetails {
562    type Output = Self;
563    fn add(self, rhs: Self) -> Self::Output {
564        Self {
565            cached_tokens: self.cached_tokens + rhs.cached_tokens,
566        }
567    }
568}
569
570/// In-depth details on output tokens.
571#[derive(Clone, Debug, Serialize, Deserialize)]
572pub struct OutputTokensDetails {
573    /// Reasoning tokens
574    pub reasoning_tokens: u64,
575}
576
577impl OutputTokensDetails {
578    pub(crate) fn new() -> Self {
579        Self {
580            reasoning_tokens: 0,
581        }
582    }
583}
584
585impl Add for OutputTokensDetails {
586    type Output = Self;
587    fn add(self, rhs: Self) -> Self::Output {
588        Self {
589            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
590        }
591    }
592}
593
594/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
595#[derive(Clone, Debug, Default, Serialize, Deserialize)]
596pub struct IncompleteDetailsReason {
597    /// The reason for an incomplete [`CompletionResponse`].
598    pub reason: String,
599}
600
601/// A response error from OpenAI's Response API.
602#[derive(Clone, Debug, Default, Serialize, Deserialize)]
603pub struct ResponseError {
604    /// Error code
605    pub code: String,
606    /// Error message
607    pub message: String,
608}
609
610/// A response object as an enum (ensures type validation)
611#[derive(Clone, Debug, Deserialize, Serialize)]
612#[serde(rename_all = "snake_case")]
613pub enum ResponseObject {
614    Response,
615}
616
617/// The response status as an enum (ensures type validation)
618#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
619#[serde(rename_all = "snake_case")]
620pub enum ResponseStatus {
621    InProgress,
622    Completed,
623    Failed,
624    Cancelled,
625    Queued,
626    Incomplete,
627}
628
629/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
630impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
631    type Error = CompletionError;
632    fn try_from(
633        (model, req): (String, crate::completion::CompletionRequest),
634    ) -> Result<Self, Self::Error> {
635        let input = {
636            let mut partial_history = vec![];
637            if let Some(docs) = req.normalized_documents() {
638                partial_history.push(docs);
639            }
640            partial_history.extend(req.chat_history);
641
642            // Initialize full history with preamble (or empty if non-existent)
643            // Some "Responses API compatible" providers don't support `instructions` field
644            // so we need to add a system message until further notice
645            let mut full_history: Vec<InputItem> = if let Some(content) = req.preamble {
646                vec![InputItem::system_message(content)]
647            } else {
648                Vec::new()
649            };
650
651            // Convert and extend the rest of the history
652            full_history.extend(
653                partial_history
654                    .into_iter()
655                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
656                    .collect::<Vec<Vec<InputItem>>>()
657                    .into_iter()
658                    .flatten()
659                    .collect::<Vec<InputItem>>(),
660            );
661
662            full_history
663        };
664
665        let input = OneOrMany::many(input)
666            .expect("This should never panic - if it does, please file a bug report");
667
668        let stream = req
669            .additional_params
670            .clone()
671            .unwrap_or(Value::Null)
672            .as_bool();
673
674        let additional_parameters = if let Some(map) = req.additional_params {
675            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
676        } else {
677            // If there's no additional parameters, initialise an empty object
678            AdditionalParameters::default()
679        };
680
681        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
682
683        Ok(Self {
684            input,
685            model,
686            instructions: None, // is currently None due to lack of support in compliant providers
687            max_output_tokens: req.max_tokens,
688            stream,
689            tool_choice,
690            tools: req
691                .tools
692                .into_iter()
693                .map(ResponsesToolDefinition::from)
694                .collect(),
695            temperature: req.temperature,
696            additional_parameters,
697        })
698    }
699}
700
701/// The completion model struct for OpenAI's response API.
702#[derive(Clone)]
703pub struct ResponsesCompletionModel<T = reqwest::Client> {
704    /// The OpenAI client
705    pub(crate) client: Client<T>,
706    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
707    pub model: String,
708}
709
710impl<T> ResponsesCompletionModel<T>
711where
712    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
713{
714    /// Creates a new [`ResponsesCompletionModel`].
715    pub fn new(client: Client<T>, model: impl Into<String>) -> Self {
716        Self {
717            client,
718            model: model.into(),
719        }
720    }
721
722    pub fn with_model(client: Client<T>, model: &str) -> Self {
723        Self {
724            client,
725            model: model.to_string(),
726        }
727    }
728
729    /// Use the Completions API instead of Responses.
730    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
731        super::completion::CompletionModel::with_model(self.client.completions_api(), &self.model)
732    }
733
734    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
735    pub(crate) fn create_completion_request(
736        &self,
737        completion_request: crate::completion::CompletionRequest,
738    ) -> Result<CompletionRequest, CompletionError> {
739        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
740
741        Ok(req)
742    }
743}
744
745/// The standard response format from OpenAI's Responses API.
746#[derive(Clone, Debug, Serialize, Deserialize)]
747pub struct CompletionResponse {
748    /// The ID of a completion response.
749    pub id: String,
750    /// The type of the object.
751    pub object: ResponseObject,
752    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
753    pub created_at: u64,
754    /// The status of the response.
755    pub status: ResponseStatus,
756    /// Response error (optional)
757    pub error: Option<ResponseError>,
758    /// Incomplete response details (optional)
759    pub incomplete_details: Option<IncompleteDetailsReason>,
760    /// System prompt/preamble
761    pub instructions: Option<String>,
762    /// The maximum number of tokens the model should output
763    pub max_output_tokens: Option<u64>,
764    /// The model name
765    pub model: String,
766    /// Token usage
767    pub usage: Option<ResponsesUsage>,
768    /// The model output (messages, etc will go here)
769    pub output: Vec<Output>,
770    /// Tools
771    #[serde(default)]
772    pub tools: Vec<ResponsesToolDefinition>,
773    /// Additional parameters
774    #[serde(flatten)]
775    pub additional_parameters: AdditionalParameters,
776}
777
778/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
779/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
780#[derive(Clone, Debug, Deserialize, Serialize, Default)]
781pub struct AdditionalParameters {
782    /// Whether or not a given model task should run in the background (ie a detached process).
783    #[serde(skip_serializing_if = "Option::is_none")]
784    pub background: Option<bool>,
785    /// The text response format. This is where you would add structured outputs (if you want them).
786    #[serde(skip_serializing_if = "Option::is_none")]
787    pub text: Option<TextConfig>,
788    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
789    #[serde(skip_serializing_if = "Option::is_none")]
790    pub include: Option<Vec<Include>>,
791    /// `top_p`. Mutually exclusive with the `temperature` argument.
792    #[serde(skip_serializing_if = "Option::is_none")]
793    pub top_p: Option<f64>,
794    /// Whether or not the response should be truncated.
795    #[serde(skip_serializing_if = "Option::is_none")]
796    pub truncation: Option<TruncationStrategy>,
797    /// The username of the user (that you want to use).
798    #[serde(skip_serializing_if = "Option::is_none")]
799    pub user: Option<String>,
800    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
801    #[serde(skip_serializing_if = "Map::is_empty", default)]
802    pub metadata: serde_json::Map<String, serde_json::Value>,
803    /// Whether or not you want tool calls to run in parallel.
804    #[serde(skip_serializing_if = "Option::is_none")]
805    pub parallel_tool_calls: Option<bool>,
806    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
807    #[serde(skip_serializing_if = "Option::is_none")]
808    pub previous_response_id: Option<String>,
809    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
810    #[serde(skip_serializing_if = "Option::is_none")]
811    pub reasoning: Option<Reasoning>,
812    /// The service tier you're using.
813    #[serde(skip_serializing_if = "Option::is_none")]
814    pub service_tier: Option<OpenAIServiceTier>,
815    /// Whether or not to store the response for later retrieval by API.
816    #[serde(skip_serializing_if = "Option::is_none")]
817    pub store: Option<bool>,
818}
819
820impl AdditionalParameters {
821    pub fn to_json(self) -> serde_json::Value {
822        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
823    }
824}
825
826/// The truncation strategy.
827/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
828/// Otherwise, does nothing (and is disabled by default).
829#[derive(Clone, Debug, Default, Serialize, Deserialize)]
830#[serde(rename_all = "snake_case")]
831pub enum TruncationStrategy {
832    Auto,
833    #[default]
834    Disabled,
835}
836
837/// The model output format configuration.
838/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
839#[derive(Clone, Debug, Serialize, Deserialize)]
840pub struct TextConfig {
841    pub format: TextFormat,
842}
843
844impl TextConfig {
845    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
846    where
847        S: Into<String>,
848    {
849        Self {
850            format: TextFormat::JsonSchema(StructuredOutputsInput {
851                name: name.into(),
852                schema,
853                strict: true,
854            }),
855        }
856    }
857}
858
859/// The text format (contained by [`TextConfig`]).
860/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
861#[derive(Clone, Debug, Serialize, Deserialize, Default)]
862#[serde(tag = "type")]
863#[serde(rename_all = "snake_case")]
864pub enum TextFormat {
865    JsonSchema(StructuredOutputsInput),
866    #[default]
867    Text,
868}
869
870/// The inputs required for adding structured outputs.
871#[derive(Clone, Debug, Serialize, Deserialize)]
872pub struct StructuredOutputsInput {
873    /// The name of your schema.
874    pub name: String,
875    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
876    pub schema: serde_json::Value,
877    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
878    pub strict: bool,
879}
880
881/// Add reasoning to a [`CompletionRequest`].
882#[derive(Clone, Debug, Default, Serialize, Deserialize)]
883pub struct Reasoning {
884    /// How much effort you want the model to put into thinking/reasoning.
885    pub effort: Option<ReasoningEffort>,
886    /// How much effort you want the model to put into writing the reasoning summary.
887    #[serde(skip_serializing_if = "Option::is_none")]
888    pub summary: Option<ReasoningSummaryLevel>,
889}
890
891impl Reasoning {
892    /// Creates a new Reasoning instantiation (with empty values).
893    pub fn new() -> Self {
894        Self {
895            effort: None,
896            summary: None,
897        }
898    }
899
900    /// Adds reasoning effort.
901    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
902        self.effort = Some(reasoning_effort);
903
904        self
905    }
906
907    /// Adds summary level (how detailed the reasoning summary will be).
908    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
909        self.summary = Some(reasoning_summary_level);
910
911        self
912    }
913}
914
915/// The billing service tier that will be used. On auto by default.
916#[derive(Clone, Debug, Default, Serialize, Deserialize)]
917#[serde(rename_all = "snake_case")]
918pub enum OpenAIServiceTier {
919    #[default]
920    Auto,
921    Default,
922    Flex,
923}
924
925/// The amount of reasoning effort that will be used by a given model.
926#[derive(Clone, Debug, Default, Serialize, Deserialize)]
927#[serde(rename_all = "snake_case")]
928pub enum ReasoningEffort {
929    None,
930    Minimal,
931    Low,
932    #[default]
933    Medium,
934    High,
935    Xhigh,
936}
937
938/// The amount of effort that will go into a reasoning summary by a given model.
939#[derive(Clone, Debug, Default, Serialize, Deserialize)]
940#[serde(rename_all = "snake_case")]
941pub enum ReasoningSummaryLevel {
942    #[default]
943    Auto,
944    Concise,
945    Detailed,
946}
947
948/// Results to additionally include in the OpenAI Responses API.
949/// Note that most of these are currently unsupported, but have been added for completeness.
950#[derive(Clone, Debug, Deserialize, Serialize)]
951pub enum Include {
952    #[serde(rename = "file_search_call.results")]
953    FileSearchCallResults,
954    #[serde(rename = "message.input_image.image_url")]
955    MessageInputImageImageUrl,
956    #[serde(rename = "computer_call.output.image_url")]
957    ComputerCallOutputOutputImageUrl,
958    #[serde(rename = "reasoning.encrypted_content")]
959    ReasoningEncryptedContent,
960    #[serde(rename = "code_interpreter_call.outputs")]
961    CodeInterpreterCallOutputs,
962}
963
964/// A currently non-exhaustive list of output types.
965#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
966#[serde(tag = "type")]
967#[serde(rename_all = "snake_case")]
968pub enum Output {
969    Message(OutputMessage),
970    #[serde(alias = "function_call")]
971    FunctionCall(OutputFunctionCall),
972    Reasoning {
973        id: String,
974        summary: Vec<ReasoningSummary>,
975    },
976}
977
978impl From<Output> for Vec<completion::AssistantContent> {
979    fn from(value: Output) -> Self {
980        let res: Vec<completion::AssistantContent> = match value {
981            Output::Message(OutputMessage { content, .. }) => content
982                .into_iter()
983                .map(completion::AssistantContent::from)
984                .collect(),
985            Output::FunctionCall(OutputFunctionCall {
986                id,
987                arguments,
988                call_id,
989                name,
990                ..
991            }) => vec![completion::AssistantContent::tool_call_with_call_id(
992                id, call_id, name, arguments,
993            )],
994            Output::Reasoning { id, summary } => {
995                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
996
997                vec![completion::AssistantContent::Reasoning(
998                    message::Reasoning::multi(summary).with_id(id),
999                )]
1000            }
1001        };
1002
1003        res
1004    }
1005}
1006
1007#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1008pub struct OutputReasoning {
1009    id: String,
1010    summary: Vec<ReasoningSummary>,
1011    status: ToolStatus,
1012}
1013
1014/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
1015#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1016pub struct OutputFunctionCall {
1017    pub id: String,
1018    #[serde(with = "json_utils::stringified_json")]
1019    pub arguments: serde_json::Value,
1020    pub call_id: String,
1021    pub name: String,
1022    pub status: ToolStatus,
1023}
1024
1025/// The status of a given tool.
1026#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1027#[serde(rename_all = "snake_case")]
1028pub enum ToolStatus {
1029    InProgress,
1030    Completed,
1031    Incomplete,
1032}
1033
1034/// An output message from OpenAI's Responses API.
1035#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1036pub struct OutputMessage {
1037    /// The message ID. Must be included when sending the message back to OpenAI
1038    pub id: String,
1039    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
1040    pub role: OutputRole,
1041    /// The status of the response
1042    pub status: ResponseStatus,
1043    /// The actual message content
1044    pub content: Vec<AssistantContent>,
1045}
1046
1047/// The role of an output message.
1048#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1049#[serde(rename_all = "snake_case")]
1050pub enum OutputRole {
1051    Assistant,
1052}
1053
1054impl<T> completion::CompletionModel for ResponsesCompletionModel<T>
1055where
1056    T: HttpClientExt
1057        + Clone
1058        + std::fmt::Debug
1059        + Default
1060        + WasmCompatSend
1061        + WasmCompatSync
1062        + 'static,
1063{
1064    type Response = CompletionResponse;
1065    type StreamingResponse = StreamingCompletionResponse;
1066
1067    type Client = super::Client<T>;
1068
1069    fn make(client: &Self::Client, model: impl Into<String>) -> Self {
1070        Self::new(client.clone(), model)
1071    }
1072
1073    async fn completion(
1074        &self,
1075        completion_request: crate::completion::CompletionRequest,
1076    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1077        let span = if tracing::Span::current().is_disabled() {
1078            info_span!(
1079                target: "rig::completions",
1080                "chat",
1081                gen_ai.operation.name = "chat",
1082                gen_ai.provider.name = tracing::field::Empty,
1083                gen_ai.request.model = tracing::field::Empty,
1084                gen_ai.response.id = tracing::field::Empty,
1085                gen_ai.response.model = tracing::field::Empty,
1086                gen_ai.usage.output_tokens = tracing::field::Empty,
1087                gen_ai.usage.input_tokens = tracing::field::Empty,
1088                gen_ai.input.messages = tracing::field::Empty,
1089                gen_ai.output.messages = tracing::field::Empty,
1090            )
1091        } else {
1092            tracing::Span::current()
1093        };
1094
1095        span.record("gen_ai.provider.name", "openai");
1096        span.record("gen_ai.request.model", &self.model);
1097        let request = self.create_completion_request(completion_request)?;
1098        let body = serde_json::to_vec(&request)?;
1099
1100        if enabled!(Level::TRACE) {
1101            tracing::trace!(
1102                target: "rig::completions",
1103                "OpenAI Responses completion request: {request}",
1104                request = serde_json::to_string_pretty(&request)?
1105            );
1106        }
1107
1108        let req = self
1109            .client
1110            .post("/responses")?
1111            .body(body)
1112            .map_err(|e| CompletionError::HttpError(e.into()))?;
1113
1114        async move {
1115            let response = self.client.send(req).await?;
1116
1117            if response.status().is_success() {
1118                let t = http_client::text(response).await?;
1119                let response = serde_json::from_str::<Self::Response>(&t)?;
1120                let span = tracing::Span::current();
1121                span.record("gen_ai.response.id", &response.id);
1122                span.record("gen_ai.response.model", &response.model);
1123                if let Some(ref usage) = response.usage {
1124                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1125                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1126                }
1127                if enabled!(Level::TRACE) {
1128                    tracing::trace!(
1129                        target: "rig::completions",
1130                        "OpenAI Responses completion response: {response}",
1131                        response = serde_json::to_string_pretty(&response)?
1132                    );
1133                }
1134                response.try_into()
1135            } else {
1136                let text = http_client::text(response).await?;
1137                Err(CompletionError::ProviderError(text))
1138            }
1139        }
1140        .instrument(span)
1141        .await
1142    }
1143
1144    async fn stream(
1145        &self,
1146        request: crate::completion::CompletionRequest,
1147    ) -> Result<
1148        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1149        CompletionError,
1150    > {
1151        ResponsesCompletionModel::stream(self, request).await
1152    }
1153}
1154
1155impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1156    type Error = CompletionError;
1157
1158    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1159        if response.output.is_empty() {
1160            return Err(CompletionError::ResponseError(
1161                "Response contained no parts".to_owned(),
1162            ));
1163        }
1164
1165        let content: Vec<completion::AssistantContent> = response
1166            .output
1167            .iter()
1168            .cloned()
1169            .flat_map(<Vec<completion::AssistantContent>>::from)
1170            .collect();
1171
1172        let choice = OneOrMany::many(content).map_err(|_| {
1173            CompletionError::ResponseError(
1174                "Response contained no message or tool call (empty)".to_owned(),
1175            )
1176        })?;
1177
1178        let usage = response
1179            .usage
1180            .as_ref()
1181            .map(|usage| completion::Usage {
1182                input_tokens: usage.input_tokens,
1183                output_tokens: usage.output_tokens,
1184                total_tokens: usage.total_tokens,
1185                cached_input_tokens: usage
1186                    .input_tokens_details
1187                    .as_ref()
1188                    .map(|d| d.cached_tokens)
1189                    .unwrap_or(0),
1190            })
1191            .unwrap_or_default();
1192
1193        Ok(completion::CompletionResponse {
1194            choice,
1195            usage,
1196            raw_response: response,
1197        })
1198    }
1199}
1200
1201/// An OpenAI Responses API message.
1202#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1203#[serde(tag = "role", rename_all = "lowercase")]
1204pub enum Message {
1205    #[serde(alias = "developer")]
1206    System {
1207        #[serde(deserialize_with = "string_or_one_or_many")]
1208        content: OneOrMany<SystemContent>,
1209        #[serde(skip_serializing_if = "Option::is_none")]
1210        name: Option<String>,
1211    },
1212    User {
1213        #[serde(deserialize_with = "string_or_one_or_many")]
1214        content: OneOrMany<UserContent>,
1215        #[serde(skip_serializing_if = "Option::is_none")]
1216        name: Option<String>,
1217    },
1218    Assistant {
1219        content: OneOrMany<AssistantContentType>,
1220        #[serde(skip_serializing_if = "String::is_empty")]
1221        id: String,
1222        #[serde(skip_serializing_if = "Option::is_none")]
1223        name: Option<String>,
1224        status: ToolStatus,
1225    },
1226    #[serde(rename = "tool")]
1227    ToolResult {
1228        tool_call_id: String,
1229        output: String,
1230    },
1231}
1232
1233/// The type of a tool result content item.
1234#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1235#[serde(rename_all = "lowercase")]
1236pub enum ToolResultContentType {
1237    #[default]
1238    Text,
1239}
1240
1241impl Message {
1242    pub fn system(content: &str) -> Self {
1243        Message::System {
1244            content: OneOrMany::one(content.to_owned().into()),
1245            name: None,
1246        }
1247    }
1248}
1249
1250/// Text assistant content.
1251/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1252#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1253#[serde(tag = "type", rename_all = "snake_case")]
1254pub enum AssistantContent {
1255    OutputText(Text),
1256    Refusal { refusal: String },
1257}
1258
1259impl From<AssistantContent> for completion::AssistantContent {
1260    fn from(value: AssistantContent) -> Self {
1261        match value {
1262            AssistantContent::Refusal { refusal } => {
1263                completion::AssistantContent::Text(Text { text: refusal })
1264            }
1265            AssistantContent::OutputText(Text { text }) => {
1266                completion::AssistantContent::Text(Text { text })
1267            }
1268        }
1269    }
1270}
1271
1272/// The type of assistant content.
1273#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1274#[serde(untagged)]
1275pub enum AssistantContentType {
1276    Text(AssistantContent),
1277    ToolCall(OutputFunctionCall),
1278    Reasoning(OpenAIReasoning),
1279}
1280
1281/// System content for the OpenAI Responses API.
1282/// Uses `input_text` type to match the Responses API format.
1283#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1284#[serde(tag = "type", rename_all = "snake_case")]
1285pub enum SystemContent {
1286    InputText { text: String },
1287}
1288
1289impl From<String> for SystemContent {
1290    fn from(s: String) -> Self {
1291        SystemContent::InputText { text: s }
1292    }
1293}
1294
1295impl std::str::FromStr for SystemContent {
1296    type Err = std::convert::Infallible;
1297
1298    fn from_str(s: &str) -> Result<Self, Self::Err> {
1299        Ok(SystemContent::InputText {
1300            text: s.to_string(),
1301        })
1302    }
1303}
1304
1305/// Different types of user content.
1306#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1307#[serde(tag = "type", rename_all = "snake_case")]
1308pub enum UserContent {
1309    InputText {
1310        text: String,
1311    },
1312    InputImage {
1313        image_url: String,
1314        #[serde(default)]
1315        detail: ImageDetail,
1316    },
1317    InputFile {
1318        #[serde(skip_serializing_if = "Option::is_none")]
1319        file_url: Option<String>,
1320        #[serde(skip_serializing_if = "Option::is_none")]
1321        file_data: Option<String>,
1322        #[serde(skip_serializing_if = "Option::is_none")]
1323        filename: Option<String>,
1324    },
1325    Audio {
1326        input_audio: InputAudio,
1327    },
1328    #[serde(rename = "tool")]
1329    ToolResult {
1330        tool_call_id: String,
1331        output: String,
1332    },
1333}
1334
1335impl TryFrom<message::Message> for Vec<Message> {
1336    type Error = message::MessageError;
1337
1338    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1339        match message {
1340            message::Message::User { content } => {
1341                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1342                    .into_iter()
1343                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1344
1345                // If there are messages with both tool results and user content, openai will only
1346                //  handle tool results. It's unlikely that there will be both.
1347                if !tool_results.is_empty() {
1348                    tool_results
1349                        .into_iter()
1350                        .map(|content| match content {
1351                            message::UserContent::ToolResult(message::ToolResult {
1352                                call_id,
1353                                content,
1354                                ..
1355                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1356                                tool_call_id: call_id.expect("The tool call ID should exist"),
1357                                output: {
1358                                    let res = content.first();
1359                                    match res {
1360                                        completion::message::ToolResultContent::Text(Text {
1361                                            text,
1362                                        }) => text,
1363                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1364                                    }
1365                                },
1366                            }),
1367                            _ => unreachable!(),
1368                        })
1369                        .collect::<Result<Vec<_>, _>>()
1370                } else {
1371                    let other_content = other_content
1372                        .into_iter()
1373                        .map(|content| match content {
1374                            message::UserContent::Text(message::Text { text }) => {
1375                                Ok(UserContent::InputText { text })
1376                            }
1377                            message::UserContent::Image(message::Image {
1378                                data,
1379                                detail,
1380                                media_type,
1381                                ..
1382                            }) => {
1383                                let url = match data {
1384                                    DocumentSourceKind::Base64(data) => {
1385                                        let media_type = if let Some(media_type) = media_type {
1386                                            media_type.to_mime_type().to_string()
1387                                        } else {
1388                                            String::new()
1389                                        };
1390                                        format!("data:{media_type};base64,{data}")
1391                                    }
1392                                    DocumentSourceKind::Url(url) => url,
1393                                    DocumentSourceKind::Raw(_) => {
1394                                        return Err(MessageError::ConversionError(
1395                                            "Raw files not supported, encode as base64 first"
1396                                                .into(),
1397                                        ));
1398                                    }
1399                                    doc => {
1400                                        return Err(MessageError::ConversionError(format!(
1401                                            "Unsupported document type: {doc}"
1402                                        )));
1403                                    }
1404                                };
1405
1406                                Ok(UserContent::InputImage {
1407                                    image_url: url,
1408                                    detail: detail.unwrap_or_default(),
1409                                })
1410                            }
1411                            message::UserContent::Document(message::Document {
1412                                media_type: Some(DocumentMediaType::PDF),
1413                                data,
1414                                ..
1415                            }) => {
1416                                let (file_data, file_url) = match data {
1417                                    DocumentSourceKind::Base64(data) => {
1418                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1419                                    }
1420                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1421                                    DocumentSourceKind::Raw(_) => {
1422                                        return Err(MessageError::ConversionError(
1423                                            "Raw files not supported, encode as base64 first"
1424                                                .into(),
1425                                        ));
1426                                    }
1427                                    doc => {
1428                                        return Err(MessageError::ConversionError(format!(
1429                                            "Unsupported document type: {doc}"
1430                                        )));
1431                                    }
1432                                };
1433
1434                                Ok(UserContent::InputFile {
1435                                    file_url,
1436                                    file_data,
1437                                    filename: Some("document.pdf".into()),
1438                                })
1439                            }
1440                            message::UserContent::Document(message::Document {
1441                                data: DocumentSourceKind::Base64(text),
1442                                ..
1443                            }) => Ok(UserContent::InputText { text }),
1444                            message::UserContent::Audio(message::Audio {
1445                                data: DocumentSourceKind::Base64(data),
1446                                media_type,
1447                                ..
1448                            }) => Ok(UserContent::Audio {
1449                                input_audio: InputAudio {
1450                                    data,
1451                                    format: match media_type {
1452                                        Some(media_type) => media_type,
1453                                        None => AudioMediaType::MP3,
1454                                    },
1455                                },
1456                            }),
1457                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1458                                "Audio must be base64 encoded data".into(),
1459                            )),
1460                            _ => unreachable!(),
1461                        })
1462                        .collect::<Result<Vec<_>, _>>()?;
1463
1464                    let other_content = OneOrMany::many(other_content).expect(
1465                        "There must be other content here if there were no tool result content",
1466                    );
1467
1468                    Ok(vec![Message::User {
1469                        content: other_content,
1470                        name: None,
1471                    }])
1472                }
1473            }
1474            message::Message::Assistant { content, id } => {
1475                let assistant_message_id = id;
1476
1477                match content.first() {
1478                    crate::message::AssistantContent::Text(Text { text }) => {
1479                        Ok(vec![Message::Assistant {
1480                            id: assistant_message_id
1481                                .expect("The assistant message ID should exist"),
1482                            status: ToolStatus::Completed,
1483                            content: OneOrMany::one(AssistantContentType::Text(
1484                                AssistantContent::OutputText(Text { text }),
1485                            )),
1486                            name: None,
1487                        }])
1488                    }
1489                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1490                        id,
1491                        call_id,
1492                        function,
1493                        ..
1494                    }) => Ok(vec![Message::Assistant {
1495                        content: OneOrMany::one(AssistantContentType::ToolCall(
1496                            OutputFunctionCall {
1497                                call_id: call_id.expect("The call ID should exist"),
1498                                arguments: function.arguments,
1499                                id,
1500                                name: function.name,
1501                                status: ToolStatus::Completed,
1502                            },
1503                        )),
1504                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1505                        name: None,
1506                        status: ToolStatus::Completed,
1507                    }]),
1508                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1509                        id,
1510                        reasoning,
1511                        ..
1512                    }) => Ok(vec![Message::Assistant {
1513                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1514                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1515                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1516                            encrypted_content: None,
1517                            status: Some(ToolStatus::Completed),
1518                        })),
1519                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1520                        name: None,
1521                        status: (ToolStatus::Completed),
1522                    }]),
1523                    crate::message::AssistantContent::Image(_) => {
1524                        Err(MessageError::ConversionError(
1525                            "Assistant image content is not supported in OpenAI Responses API".into(),
1526                        ))
1527                    }
1528                }
1529            }
1530        }
1531    }
1532}
1533
1534impl FromStr for UserContent {
1535    type Err = Infallible;
1536
1537    fn from_str(s: &str) -> Result<Self, Self::Err> {
1538        Ok(UserContent::InputText {
1539            text: s.to_string(),
1540        })
1541    }
1542}