rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::completion::ToolChoice;
11use super::{Client, responses_api::streaming::StreamingCompletionResponse};
12use super::{InputAudio, SystemContent};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::wasm_compat::{WasmCompatSend, WasmCompatSync};
24use crate::{OneOrMany, completion, message};
25use serde::{Deserialize, Serialize};
26use serde_json::{Map, Value};
27use tracing::{Instrument, info_span};
28
29use std::convert::Infallible;
30use std::ops::Add;
31use std::str::FromStr;
32
33pub mod streaming;
34
35/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
36/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
37#[derive(Debug, Deserialize, Serialize, Clone)]
38pub struct CompletionRequest {
39    /// Message inputs
40    pub input: OneOrMany<InputItem>,
41    /// The model name
42    pub model: String,
43    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub instructions: Option<String>,
46    /// The maximum number of output tokens.
47    #[serde(skip_serializing_if = "Option::is_none")]
48    pub max_output_tokens: Option<u64>,
49    /// Toggle to true for streaming responses.
50    #[serde(skip_serializing_if = "Option::is_none")]
51    pub stream: Option<bool>,
52    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub temperature: Option<f64>,
55    /// Whether the LLM should be forced to use a tool before returning a response.
56    /// If none provided, the default option is "auto".
57    #[serde(skip_serializing_if = "Option::is_none")]
58    tool_choice: Option<ToolChoice>,
59    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
60    #[serde(skip_serializing_if = "Vec::is_empty")]
61    pub tools: Vec<ResponsesToolDefinition>,
62    /// Additional parameters
63    #[serde(flatten)]
64    pub additional_parameters: AdditionalParameters,
65}
66
67impl CompletionRequest {
68    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
69    where
70        S: Into<String>,
71    {
72        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
73
74        self
75    }
76
77    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
78        self.additional_parameters.reasoning = Some(reasoning);
79
80        self
81    }
82}
83
84/// An input item for [`CompletionRequest`].
85#[derive(Debug, Deserialize, Serialize, Clone)]
86pub struct InputItem {
87    /// The role of an input item/message.
88    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
89    /// Everything else should be None.
90    #[serde(skip_serializing_if = "Option::is_none")]
91    role: Option<Role>,
92    /// The input content itself.
93    #[serde(flatten)]
94    input: InputContent,
95}
96
97/// Message roles. Used by OpenAI Responses API to determine who created a given message.
98#[derive(Debug, Deserialize, Serialize, Clone)]
99#[serde(rename_all = "lowercase")]
100pub enum Role {
101    User,
102    Assistant,
103    System,
104}
105
106/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
107#[derive(Debug, Deserialize, Serialize, Clone)]
108#[serde(tag = "type", rename_all = "snake_case")]
109pub enum InputContent {
110    Message(Message),
111    Reasoning(OpenAIReasoning),
112    FunctionCall(OutputFunctionCall),
113    FunctionCallOutput(ToolResult),
114}
115
116#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
117pub struct OpenAIReasoning {
118    id: String,
119    pub summary: Vec<ReasoningSummary>,
120    pub encrypted_content: Option<String>,
121    #[serde(skip_serializing_if = "Option::is_none")]
122    pub status: Option<ToolStatus>,
123}
124
125#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
126#[serde(tag = "type", rename_all = "snake_case")]
127pub enum ReasoningSummary {
128    SummaryText { text: String },
129}
130
131impl ReasoningSummary {
132    fn new(input: &str) -> Self {
133        Self::SummaryText {
134            text: input.to_string(),
135        }
136    }
137
138    pub fn text(&self) -> String {
139        let ReasoningSummary::SummaryText { text } = self;
140        text.clone()
141    }
142}
143
144/// A tool result.
145#[derive(Debug, Deserialize, Serialize, Clone)]
146pub struct ToolResult {
147    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
148    call_id: String,
149    /// The result of a tool call.
150    output: String,
151    /// The status of a tool call (if used in a completion request, this should always be Completed)
152    status: ToolStatus,
153}
154
155impl From<Message> for InputItem {
156    fn from(value: Message) -> Self {
157        match value {
158            Message::User { .. } => Self {
159                role: Some(Role::User),
160                input: InputContent::Message(value),
161            },
162            Message::Assistant { ref content, .. } => {
163                let role = if content
164                    .clone()
165                    .iter()
166                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
167                {
168                    None
169                } else {
170                    Some(Role::Assistant)
171                };
172                Self {
173                    role,
174                    input: InputContent::Message(value),
175                }
176            }
177            Message::System { .. } => Self {
178                role: Some(Role::System),
179                input: InputContent::Message(value),
180            },
181            Message::ToolResult {
182                tool_call_id,
183                output,
184            } => Self {
185                role: None,
186                input: InputContent::FunctionCallOutput(ToolResult {
187                    call_id: tool_call_id,
188                    output,
189                    status: ToolStatus::Completed,
190                }),
191            },
192        }
193    }
194}
195
196impl TryFrom<crate::completion::Message> for Vec<InputItem> {
197    type Error = CompletionError;
198
199    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
200        match value {
201            crate::completion::Message::User { content } => {
202                let mut items = Vec::new();
203
204                for user_content in content {
205                    match user_content {
206                        crate::message::UserContent::Text(Text { text }) => {
207                            items.push(InputItem {
208                                role: Some(Role::User),
209                                input: InputContent::Message(Message::User {
210                                    content: OneOrMany::one(UserContent::InputText { text }),
211                                    name: None,
212                                }),
213                            });
214                        }
215                        crate::message::UserContent::ToolResult(
216                            crate::completion::message::ToolResult {
217                                call_id,
218                                content: tool_content,
219                                ..
220                            },
221                        ) => {
222                            for tool_result_content in tool_content {
223                                let crate::completion::message::ToolResultContent::Text(Text {
224                                    text,
225                                }) = tool_result_content
226                                else {
227                                    return Err(CompletionError::ProviderError(
228                                        "This thing only supports text!".to_string(),
229                                    ));
230                                };
231                                // let output = serde_json::from_str(&text)?;
232                                items.push(InputItem {
233                                    role: None,
234                                    input: InputContent::FunctionCallOutput(ToolResult {
235                                        call_id: call_id
236                                            .clone()
237                                            .expect("The call ID of this tool should exist!"),
238                                        output: text,
239                                        status: ToolStatus::Completed,
240                                    }),
241                                });
242                            }
243                        }
244                        crate::message::UserContent::Document(Document {
245                            data,
246                            media_type: Some(DocumentMediaType::PDF),
247                            ..
248                        }) => {
249                            let (file_data, file_url) = match data {
250                                DocumentSourceKind::Base64(data) => {
251                                    (Some(format!("data:application/pdf;base64,{data}")), None)
252                                }
253                                DocumentSourceKind::Url(url) => (None, Some(url)),
254                                DocumentSourceKind::Raw(_) => {
255                                    return Err(CompletionError::RequestError(
256                                        "Raw file data not supported, encode as base64 first"
257                                            .into(),
258                                    ));
259                                }
260                                doc => {
261                                    return Err(CompletionError::RequestError(
262                                        format!("Unsupported document type: {doc}").into(),
263                                    ));
264                                }
265                            };
266
267                            items.push(InputItem {
268                                role: Some(Role::User),
269                                input: InputContent::Message(Message::User {
270                                    content: OneOrMany::one(UserContent::InputFile {
271                                        file_data,
272                                        file_url,
273                                        filename: Some("document.pdf".to_string()),
274                                    }),
275                                    name: None,
276                                }),
277                            })
278                        }
279                        // todo: should we ensure this takes into account file size?
280                        crate::message::UserContent::Document(Document {
281                            data: DocumentSourceKind::Base64(text),
282                            ..
283                        }) => items.push(InputItem {
284                            role: Some(Role::User),
285                            input: InputContent::Message(Message::User {
286                                content: OneOrMany::one(UserContent::InputText { text }),
287                                name: None,
288                            }),
289                        }),
290                        crate::message::UserContent::Document(Document {
291                            data: DocumentSourceKind::String(text),
292                            ..
293                        }) => items.push(InputItem {
294                            role: Some(Role::User),
295                            input: InputContent::Message(Message::User {
296                                content: OneOrMany::one(UserContent::InputText { text }),
297                                name: None,
298                            }),
299                        }),
300                        crate::message::UserContent::Image(crate::message::Image {
301                            data,
302                            media_type,
303                            detail,
304                            ..
305                        }) => {
306                            let url = match data {
307                                DocumentSourceKind::Base64(data) => {
308                                    let media_type = if let Some(media_type) = media_type {
309                                        media_type.to_mime_type().to_string()
310                                    } else {
311                                        String::new()
312                                    };
313                                    format!("data:{media_type};base64,{data}")
314                                }
315                                DocumentSourceKind::Url(url) => url,
316                                DocumentSourceKind::Raw(_) => {
317                                    return Err(CompletionError::RequestError(
318                                        "Raw file data not supported, encode as base64 first"
319                                            .into(),
320                                    ));
321                                }
322                                doc => {
323                                    return Err(CompletionError::RequestError(
324                                        format!("Unsupported document type: {doc}").into(),
325                                    ));
326                                }
327                            };
328                            items.push(InputItem {
329                                role: Some(Role::User),
330                                input: InputContent::Message(Message::User {
331                                    content: OneOrMany::one(UserContent::InputImage {
332                                        image_url: url,
333                                        detail: detail.unwrap_or_default(),
334                                    }),
335                                    name: None,
336                                }),
337                            });
338                        }
339                        message => {
340                            return Err(CompletionError::ProviderError(format!(
341                                "Unsupported message: {message:?}"
342                            )));
343                        }
344                    }
345                }
346
347                Ok(items)
348            }
349            crate::completion::Message::Assistant { id, content } => {
350                let mut items = Vec::new();
351
352                for assistant_content in content {
353                    match assistant_content {
354                        crate::message::AssistantContent::Text(Text { text }) => {
355                            let id = id.as_ref().unwrap_or(&String::default()).clone();
356                            items.push(InputItem {
357                                role: Some(Role::Assistant),
358                                input: InputContent::Message(Message::Assistant {
359                                    content: OneOrMany::one(AssistantContentType::Text(
360                                        AssistantContent::OutputText(Text { text }),
361                                    )),
362                                    id,
363                                    name: None,
364                                    status: ToolStatus::Completed,
365                                }),
366                            });
367                        }
368                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
369                            id: tool_id,
370                            call_id,
371                            function,
372                        }) => {
373                            items.push(InputItem {
374                                role: None,
375                                input: InputContent::FunctionCall(OutputFunctionCall {
376                                    arguments: function.arguments,
377                                    call_id: call_id.expect("The tool call ID should exist!"),
378                                    id: tool_id,
379                                    name: function.name,
380                                    status: ToolStatus::Completed,
381                                }),
382                            });
383                        }
384                        crate::message::AssistantContent::Reasoning(
385                            crate::message::Reasoning { id, reasoning, .. },
386                        ) => {
387                            items.push(InputItem {
388                                role: None,
389                                input: InputContent::Reasoning(OpenAIReasoning {
390                                    id: id
391                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
392                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
393                                    encrypted_content: None,
394                                    status: None,
395                                }),
396                            });
397                        }
398                        crate::message::AssistantContent::Image(_) => {
399                            return Err(CompletionError::ProviderError(
400                                "Assistant image content is not supported in OpenAI Responses API"
401                                    .to_string(),
402                            ));
403                        }
404                    }
405                }
406
407                Ok(items)
408            }
409        }
410    }
411}
412
413impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
414    fn from(value: OneOrMany<String>) -> Self {
415        value.iter().map(|x| ReasoningSummary::new(x)).collect()
416    }
417}
418
419/// The definition of a tool response, repurposed for OpenAI's Responses API.
420#[derive(Debug, Deserialize, Serialize, Clone)]
421pub struct ResponsesToolDefinition {
422    /// Tool name
423    pub name: String,
424    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
425    pub parameters: serde_json::Value,
426    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
427    pub strict: bool,
428    /// The type of tool. This should always be "function".
429    #[serde(rename = "type")]
430    pub kind: String,
431    /// Tool description.
432    pub description: String,
433}
434
435/// Recursively ensures all object schemas in a JSON schema respect OpenAI structured output restrictions.
436/// Nested arrays, schema $defs, object properties and enums should be handled through this method
437fn sanitize_schema(schema: &mut serde_json::Value) {
438    if let Value::Object(obj) = schema {
439        let is_object_schema = obj.get("type") == Some(&Value::String("object".to_string()))
440            || obj.contains_key("properties");
441
442        // This is required by OpenAI's Responses API when using strict mode.
443        // Source: https://platform.openai.com/docs/guides/structured-outputs#additionalproperties-false-must-always-be-set-in-objects
444        if is_object_schema && !obj.contains_key("additionalProperties") {
445            obj.insert("additionalProperties".to_string(), Value::Bool(false));
446        }
447
448        // This is also required by OpenAI's Responses API
449        // Source: https://platform.openai.com/docs/guides/structured-outputs#all-fields-must-be-required
450        if let Some(Value::Object(properties)) = obj.get("properties") {
451            let prop_keys = properties.keys().cloned().map(Value::String).collect();
452            obj.insert("required".to_string(), Value::Array(prop_keys));
453        }
454
455        if let Some(defs) = obj.get_mut("$defs")
456            && let Value::Object(defs_obj) = defs
457        {
458            for (_, def_schema) in defs_obj.iter_mut() {
459                sanitize_schema(def_schema);
460            }
461        }
462
463        if let Some(properties) = obj.get_mut("properties")
464            && let Value::Object(props) = properties
465        {
466            for (_, prop_value) in props.iter_mut() {
467                sanitize_schema(prop_value);
468            }
469        }
470
471        if let Some(items) = obj.get_mut("items") {
472            sanitize_schema(items);
473        }
474
475        // should handle Enums (anyOf/oneOf)
476        for key in ["anyOf", "oneOf", "allOf"] {
477            if let Some(variants) = obj.get_mut(key)
478                && let Value::Array(variants_array) = variants
479            {
480                for variant in variants_array.iter_mut() {
481                    sanitize_schema(variant);
482                }
483            }
484        }
485    }
486}
487
488impl From<completion::ToolDefinition> for ResponsesToolDefinition {
489    fn from(value: completion::ToolDefinition) -> Self {
490        let completion::ToolDefinition {
491            name,
492            mut parameters,
493            description,
494        } = value;
495
496        sanitize_schema(&mut parameters);
497
498        Self {
499            name,
500            parameters,
501            description,
502            kind: "function".to_string(),
503            strict: true,
504        }
505    }
506}
507
508/// Token usage.
509/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
510#[derive(Clone, Debug, Serialize, Deserialize)]
511pub struct ResponsesUsage {
512    /// Input tokens
513    pub input_tokens: u64,
514    /// In-depth detail on input tokens (cached tokens)
515    #[serde(skip_serializing_if = "Option::is_none")]
516    pub input_tokens_details: Option<InputTokensDetails>,
517    /// Output tokens
518    pub output_tokens: u64,
519    /// In-depth detail on output tokens (reasoning tokens)
520    pub output_tokens_details: OutputTokensDetails,
521    /// Total tokens used (for a given prompt)
522    pub total_tokens: u64,
523}
524
525impl ResponsesUsage {
526    /// Create a new ResponsesUsage instance
527    pub(crate) fn new() -> Self {
528        Self {
529            input_tokens: 0,
530            input_tokens_details: Some(InputTokensDetails::new()),
531            output_tokens: 0,
532            output_tokens_details: OutputTokensDetails::new(),
533            total_tokens: 0,
534        }
535    }
536}
537
538impl Add for ResponsesUsage {
539    type Output = Self;
540
541    fn add(self, rhs: Self) -> Self::Output {
542        let input_tokens = self.input_tokens + rhs.input_tokens;
543        let input_tokens_details = self.input_tokens_details.map(|lhs| {
544            if let Some(tokens) = rhs.input_tokens_details {
545                lhs + tokens
546            } else {
547                lhs
548            }
549        });
550        let output_tokens = self.output_tokens + rhs.output_tokens;
551        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
552        let total_tokens = self.total_tokens + rhs.total_tokens;
553        Self {
554            input_tokens,
555            input_tokens_details,
556            output_tokens,
557            output_tokens_details,
558            total_tokens,
559        }
560    }
561}
562
563/// In-depth details on input tokens.
564#[derive(Clone, Debug, Serialize, Deserialize)]
565pub struct InputTokensDetails {
566    /// Cached tokens from OpenAI
567    pub cached_tokens: u64,
568}
569
570impl InputTokensDetails {
571    pub(crate) fn new() -> Self {
572        Self { cached_tokens: 0 }
573    }
574}
575
576impl Add for InputTokensDetails {
577    type Output = Self;
578    fn add(self, rhs: Self) -> Self::Output {
579        Self {
580            cached_tokens: self.cached_tokens + rhs.cached_tokens,
581        }
582    }
583}
584
585/// In-depth details on output tokens.
586#[derive(Clone, Debug, Serialize, Deserialize)]
587pub struct OutputTokensDetails {
588    /// Reasoning tokens
589    pub reasoning_tokens: u64,
590}
591
592impl OutputTokensDetails {
593    pub(crate) fn new() -> Self {
594        Self {
595            reasoning_tokens: 0,
596        }
597    }
598}
599
600impl Add for OutputTokensDetails {
601    type Output = Self;
602    fn add(self, rhs: Self) -> Self::Output {
603        Self {
604            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
605        }
606    }
607}
608
609/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
610#[derive(Clone, Debug, Default, Serialize, Deserialize)]
611pub struct IncompleteDetailsReason {
612    /// The reason for an incomplete [`CompletionResponse`].
613    pub reason: String,
614}
615
616/// A response error from OpenAI's Response API.
617#[derive(Clone, Debug, Default, Serialize, Deserialize)]
618pub struct ResponseError {
619    /// Error code
620    pub code: String,
621    /// Error message
622    pub message: String,
623}
624
625/// A response object as an enum (ensures type validation)
626#[derive(Clone, Debug, Deserialize, Serialize)]
627#[serde(rename_all = "snake_case")]
628pub enum ResponseObject {
629    Response,
630}
631
632/// The response status as an enum (ensures type validation)
633#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
634#[serde(rename_all = "snake_case")]
635pub enum ResponseStatus {
636    InProgress,
637    Completed,
638    Failed,
639    Cancelled,
640    Queued,
641    Incomplete,
642}
643
644/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
645impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
646    type Error = CompletionError;
647    fn try_from(
648        (model, req): (String, crate::completion::CompletionRequest),
649    ) -> Result<Self, Self::Error> {
650        let input = {
651            let mut partial_history = vec![];
652            if let Some(docs) = req.normalized_documents() {
653                partial_history.push(docs);
654            }
655            partial_history.extend(req.chat_history);
656
657            // Initialize full history with preamble (or empty if non-existent)
658            let mut full_history: Vec<InputItem> = Vec::new();
659
660            // Convert and extend the rest of the history
661            full_history.extend(
662                partial_history
663                    .into_iter()
664                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
665                    .collect::<Vec<Vec<InputItem>>>()
666                    .into_iter()
667                    .flatten()
668                    .collect::<Vec<InputItem>>(),
669            );
670
671            full_history
672        };
673
674        let input = OneOrMany::many(input)
675            .expect("This should never panic - if it does, please file a bug report");
676
677        let stream = req
678            .additional_params
679            .clone()
680            .unwrap_or(Value::Null)
681            .as_bool();
682
683        let additional_parameters = if let Some(map) = req.additional_params {
684            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
685        } else {
686            // If there's no additional parameters, initialise an empty object
687            AdditionalParameters::default()
688        };
689
690        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
691
692        Ok(Self {
693            input,
694            model,
695            instructions: req.preamble,
696            max_output_tokens: req.max_tokens,
697            stream,
698            tool_choice,
699            tools: req
700                .tools
701                .into_iter()
702                .map(ResponsesToolDefinition::from)
703                .collect(),
704            temperature: req.temperature,
705            additional_parameters,
706        })
707    }
708}
709
710/// The completion model struct for OpenAI's response API.
711#[derive(Clone)]
712pub struct ResponsesCompletionModel<T = reqwest::Client> {
713    /// The OpenAI client
714    pub(crate) client: Client<T>,
715    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
716    pub model: String,
717}
718
719impl<T> ResponsesCompletionModel<T>
720where
721    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
722{
723    /// Creates a new [`ResponsesCompletionModel`].
724    pub fn new(client: Client<T>, model: impl Into<String>) -> Self {
725        Self {
726            client,
727            model: model.into(),
728        }
729    }
730
731    pub fn with_model(client: Client<T>, model: &str) -> Self {
732        Self {
733            client,
734            model: model.to_string(),
735        }
736    }
737
738    /// Use the Completions API instead of Responses.
739    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
740        super::completion::CompletionModel::with_model(self.client.completions_api(), &self.model)
741    }
742
743    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
744    pub(crate) fn create_completion_request(
745        &self,
746        completion_request: crate::completion::CompletionRequest,
747    ) -> Result<CompletionRequest, CompletionError> {
748        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
749
750        Ok(req)
751    }
752}
753
754/// The standard response format from OpenAI's Responses API.
755#[derive(Clone, Debug, Serialize, Deserialize)]
756pub struct CompletionResponse {
757    /// The ID of a completion response.
758    pub id: String,
759    /// The type of the object.
760    pub object: ResponseObject,
761    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
762    pub created_at: u64,
763    /// The status of the response.
764    pub status: ResponseStatus,
765    /// Response error (optional)
766    pub error: Option<ResponseError>,
767    /// Incomplete response details (optional)
768    pub incomplete_details: Option<IncompleteDetailsReason>,
769    /// System prompt/preamble
770    pub instructions: Option<String>,
771    /// The maximum number of tokens the model should output
772    pub max_output_tokens: Option<u64>,
773    /// The model name
774    pub model: String,
775    /// Token usage
776    pub usage: Option<ResponsesUsage>,
777    /// The model output (messages, etc will go here)
778    pub output: Vec<Output>,
779    /// Tools
780    #[serde(default)]
781    pub tools: Vec<ResponsesToolDefinition>,
782    /// Additional parameters
783    #[serde(flatten)]
784    pub additional_parameters: AdditionalParameters,
785}
786
787/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
788/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
789#[derive(Clone, Debug, Deserialize, Serialize, Default)]
790pub struct AdditionalParameters {
791    /// Whether or not a given model task should run in the background (ie a detached process).
792    #[serde(skip_serializing_if = "Option::is_none")]
793    pub background: Option<bool>,
794    /// The text response format. This is where you would add structured outputs (if you want them).
795    #[serde(skip_serializing_if = "Option::is_none")]
796    pub text: Option<TextConfig>,
797    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
798    #[serde(skip_serializing_if = "Option::is_none")]
799    pub include: Option<Vec<Include>>,
800    /// `top_p`. Mutually exclusive with the `temperature` argument.
801    #[serde(skip_serializing_if = "Option::is_none")]
802    pub top_p: Option<f64>,
803    /// Whether or not the response should be truncated.
804    #[serde(skip_serializing_if = "Option::is_none")]
805    pub truncation: Option<TruncationStrategy>,
806    /// The username of the user (that you want to use).
807    #[serde(skip_serializing_if = "Option::is_none")]
808    pub user: Option<String>,
809    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
810    #[serde(skip_serializing_if = "Map::is_empty", default)]
811    pub metadata: serde_json::Map<String, serde_json::Value>,
812    /// Whether or not you want tool calls to run in parallel.
813    #[serde(skip_serializing_if = "Option::is_none")]
814    pub parallel_tool_calls: Option<bool>,
815    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
816    #[serde(skip_serializing_if = "Option::is_none")]
817    pub previous_response_id: Option<String>,
818    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
819    #[serde(skip_serializing_if = "Option::is_none")]
820    pub reasoning: Option<Reasoning>,
821    /// The service tier you're using.
822    #[serde(skip_serializing_if = "Option::is_none")]
823    pub service_tier: Option<OpenAIServiceTier>,
824    /// Whether or not to store the response for later retrieval by API.
825    #[serde(skip_serializing_if = "Option::is_none")]
826    pub store: Option<bool>,
827}
828
829impl AdditionalParameters {
830    pub fn to_json(self) -> serde_json::Value {
831        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
832    }
833}
834
835/// The truncation strategy.
836/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
837/// Otherwise, does nothing (and is disabled by default).
838#[derive(Clone, Debug, Default, Serialize, Deserialize)]
839#[serde(rename_all = "snake_case")]
840pub enum TruncationStrategy {
841    Auto,
842    #[default]
843    Disabled,
844}
845
846/// The model output format configuration.
847/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
848#[derive(Clone, Debug, Serialize, Deserialize)]
849pub struct TextConfig {
850    pub format: TextFormat,
851}
852
853impl TextConfig {
854    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
855    where
856        S: Into<String>,
857    {
858        Self {
859            format: TextFormat::JsonSchema(StructuredOutputsInput {
860                name: name.into(),
861                schema,
862                strict: true,
863            }),
864        }
865    }
866}
867
868/// The text format (contained by [`TextConfig`]).
869/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
870#[derive(Clone, Debug, Serialize, Deserialize, Default)]
871#[serde(tag = "type")]
872#[serde(rename_all = "snake_case")]
873pub enum TextFormat {
874    JsonSchema(StructuredOutputsInput),
875    #[default]
876    Text,
877}
878
879/// The inputs required for adding structured outputs.
880#[derive(Clone, Debug, Serialize, Deserialize)]
881pub struct StructuredOutputsInput {
882    /// The name of your schema.
883    pub name: String,
884    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
885    pub schema: serde_json::Value,
886    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
887    pub strict: bool,
888}
889
890/// Add reasoning to a [`CompletionRequest`].
891#[derive(Clone, Debug, Default, Serialize, Deserialize)]
892pub struct Reasoning {
893    /// How much effort you want the model to put into thinking/reasoning.
894    pub effort: Option<ReasoningEffort>,
895    /// How much effort you want the model to put into writing the reasoning summary.
896    #[serde(skip_serializing_if = "Option::is_none")]
897    pub summary: Option<ReasoningSummaryLevel>,
898}
899
900impl Reasoning {
901    /// Creates a new Reasoning instantiation (with empty values).
902    pub fn new() -> Self {
903        Self {
904            effort: None,
905            summary: None,
906        }
907    }
908
909    /// Adds reasoning effort.
910    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
911        self.effort = Some(reasoning_effort);
912
913        self
914    }
915
916    /// Adds summary level (how detailed the reasoning summary will be).
917    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
918        self.summary = Some(reasoning_summary_level);
919
920        self
921    }
922}
923
924/// The billing service tier that will be used. On auto by default.
925#[derive(Clone, Debug, Default, Serialize, Deserialize)]
926#[serde(rename_all = "snake_case")]
927pub enum OpenAIServiceTier {
928    #[default]
929    Auto,
930    Default,
931    Flex,
932}
933
934/// The amount of reasoning effort that will be used by a given model.
935#[derive(Clone, Debug, Default, Serialize, Deserialize)]
936#[serde(rename_all = "snake_case")]
937pub enum ReasoningEffort {
938    Minimal,
939    Low,
940    #[default]
941    Medium,
942    High,
943}
944
945/// The amount of effort that will go into a reasoning summary by a given model.
946#[derive(Clone, Debug, Default, Serialize, Deserialize)]
947#[serde(rename_all = "snake_case")]
948pub enum ReasoningSummaryLevel {
949    #[default]
950    Auto,
951    Concise,
952    Detailed,
953}
954
955/// Results to additionally include in the OpenAI Responses API.
956/// Note that most of these are currently unsupported, but have been added for completeness.
957#[derive(Clone, Debug, Deserialize, Serialize)]
958pub enum Include {
959    #[serde(rename = "file_search_call.results")]
960    FileSearchCallResults,
961    #[serde(rename = "message.input_image.image_url")]
962    MessageInputImageImageUrl,
963    #[serde(rename = "computer_call.output.image_url")]
964    ComputerCallOutputOutputImageUrl,
965    #[serde(rename = "reasoning.encrypted_content")]
966    ReasoningEncryptedContent,
967    #[serde(rename = "code_interpreter_call.outputs")]
968    CodeInterpreterCallOutputs,
969}
970
971/// A currently non-exhaustive list of output types.
972#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
973#[serde(tag = "type")]
974#[serde(rename_all = "snake_case")]
975pub enum Output {
976    Message(OutputMessage),
977    #[serde(alias = "function_call")]
978    FunctionCall(OutputFunctionCall),
979    Reasoning {
980        id: String,
981        summary: Vec<ReasoningSummary>,
982    },
983}
984
985impl From<Output> for Vec<completion::AssistantContent> {
986    fn from(value: Output) -> Self {
987        let res: Vec<completion::AssistantContent> = match value {
988            Output::Message(OutputMessage { content, .. }) => content
989                .into_iter()
990                .map(completion::AssistantContent::from)
991                .collect(),
992            Output::FunctionCall(OutputFunctionCall {
993                id,
994                arguments,
995                call_id,
996                name,
997                ..
998            }) => vec![completion::AssistantContent::tool_call_with_call_id(
999                id, call_id, name, arguments,
1000            )],
1001            Output::Reasoning { id, summary } => {
1002                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
1003
1004                vec![completion::AssistantContent::Reasoning(
1005                    message::Reasoning::multi(summary).with_id(id),
1006                )]
1007            }
1008        };
1009
1010        res
1011    }
1012}
1013
1014#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1015pub struct OutputReasoning {
1016    id: String,
1017    summary: Vec<ReasoningSummary>,
1018    status: ToolStatus,
1019}
1020
1021/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
1022#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1023pub struct OutputFunctionCall {
1024    pub id: String,
1025    #[serde(with = "json_utils::stringified_json")]
1026    pub arguments: serde_json::Value,
1027    pub call_id: String,
1028    pub name: String,
1029    pub status: ToolStatus,
1030}
1031
1032/// The status of a given tool.
1033#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1034#[serde(rename_all = "snake_case")]
1035pub enum ToolStatus {
1036    InProgress,
1037    Completed,
1038    Incomplete,
1039}
1040
1041/// An output message from OpenAI's Responses API.
1042#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1043pub struct OutputMessage {
1044    /// The message ID. Must be included when sending the message back to OpenAI
1045    pub id: String,
1046    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
1047    pub role: OutputRole,
1048    /// The status of the response
1049    pub status: ResponseStatus,
1050    /// The actual message content
1051    pub content: Vec<AssistantContent>,
1052}
1053
1054/// The role of an output message.
1055#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1056#[serde(rename_all = "snake_case")]
1057pub enum OutputRole {
1058    Assistant,
1059}
1060
1061impl<T> completion::CompletionModel for ResponsesCompletionModel<T>
1062where
1063    T: HttpClientExt
1064        + Clone
1065        + std::fmt::Debug
1066        + Default
1067        + WasmCompatSend
1068        + WasmCompatSync
1069        + 'static,
1070{
1071    type Response = CompletionResponse;
1072    type StreamingResponse = StreamingCompletionResponse;
1073
1074    type Client = super::Client<T>;
1075
1076    fn make(client: &Self::Client, model: impl Into<String>) -> Self {
1077        Self::new(client.clone(), model)
1078    }
1079
1080    async fn completion(
1081        &self,
1082        completion_request: crate::completion::CompletionRequest,
1083    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1084        let span = if tracing::Span::current().is_disabled() {
1085            info_span!(
1086                target: "rig::completions",
1087                "chat",
1088                gen_ai.operation.name = "chat",
1089                gen_ai.provider.name = tracing::field::Empty,
1090                gen_ai.request.model = tracing::field::Empty,
1091                gen_ai.response.id = tracing::field::Empty,
1092                gen_ai.response.model = tracing::field::Empty,
1093                gen_ai.usage.output_tokens = tracing::field::Empty,
1094                gen_ai.usage.input_tokens = tracing::field::Empty,
1095                gen_ai.input.messages = tracing::field::Empty,
1096                gen_ai.output.messages = tracing::field::Empty,
1097            )
1098        } else {
1099            tracing::Span::current()
1100        };
1101
1102        span.record("gen_ai.provider.name", "openai");
1103        span.record("gen_ai.request.model", &self.model);
1104        let request = self.create_completion_request(completion_request)?;
1105        span.record(
1106            "gen_ai.input.messages",
1107            serde_json::to_string(&request.input)
1108                .expect("openai request to successfully turn into a JSON value"),
1109        );
1110        let body = serde_json::to_vec(&request)?;
1111        tracing::trace!(
1112            target: "rig::completions",
1113            "OpenAI Responses API input: {request}",
1114            request = serde_json::to_string_pretty(&request).unwrap()
1115        );
1116
1117        let req = self
1118            .client
1119            .post("/responses")?
1120            .body(body)
1121            .map_err(|e| CompletionError::HttpError(e.into()))?;
1122
1123        async move {
1124            let response = self.client.send(req).await?;
1125
1126            if response.status().is_success() {
1127                let t = http_client::text(response).await?;
1128                let response = serde_json::from_str::<Self::Response>(&t)?;
1129                let span = tracing::Span::current();
1130                span.record(
1131                    "gen_ai.output.messages",
1132                    serde_json::to_string(&response.output).unwrap(),
1133                );
1134                span.record("gen_ai.response.id", &response.id);
1135                span.record("gen_ai.response.model", &response.model);
1136                if let Some(ref usage) = response.usage {
1137                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1138                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1139                }
1140                // We need to call the event here to get the span to actually send anything
1141                tracing::info!("API successfully called");
1142                response.try_into()
1143            } else {
1144                let text = http_client::text(response).await?;
1145                Err(CompletionError::ProviderError(text))
1146            }
1147        }
1148        .instrument(span)
1149        .await
1150    }
1151
1152    #[cfg_attr(feature = "worker", worker::send)]
1153    async fn stream(
1154        &self,
1155        request: crate::completion::CompletionRequest,
1156    ) -> Result<
1157        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1158        CompletionError,
1159    > {
1160        ResponsesCompletionModel::stream(self, request).await
1161    }
1162}
1163
1164impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1165    type Error = CompletionError;
1166
1167    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1168        if response.output.is_empty() {
1169            return Err(CompletionError::ResponseError(
1170                "Response contained no parts".to_owned(),
1171            ));
1172        }
1173
1174        let content: Vec<completion::AssistantContent> = response
1175            .output
1176            .iter()
1177            .cloned()
1178            .flat_map(<Vec<completion::AssistantContent>>::from)
1179            .collect();
1180
1181        let choice = OneOrMany::many(content).map_err(|_| {
1182            CompletionError::ResponseError(
1183                "Response contained no message or tool call (empty)".to_owned(),
1184            )
1185        })?;
1186
1187        let usage = response
1188            .usage
1189            .as_ref()
1190            .map(|usage| completion::Usage {
1191                input_tokens: usage.input_tokens,
1192                output_tokens: usage.output_tokens,
1193                total_tokens: usage.total_tokens,
1194            })
1195            .unwrap_or_default();
1196
1197        Ok(completion::CompletionResponse {
1198            choice,
1199            usage,
1200            raw_response: response,
1201        })
1202    }
1203}
1204
1205/// An OpenAI Responses API message.
1206#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1207#[serde(tag = "role", rename_all = "lowercase")]
1208pub enum Message {
1209    #[serde(alias = "developer")]
1210    System {
1211        #[serde(deserialize_with = "string_or_one_or_many")]
1212        content: OneOrMany<SystemContent>,
1213        #[serde(skip_serializing_if = "Option::is_none")]
1214        name: Option<String>,
1215    },
1216    User {
1217        #[serde(deserialize_with = "string_or_one_or_many")]
1218        content: OneOrMany<UserContent>,
1219        #[serde(skip_serializing_if = "Option::is_none")]
1220        name: Option<String>,
1221    },
1222    Assistant {
1223        content: OneOrMany<AssistantContentType>,
1224        #[serde(skip_serializing_if = "String::is_empty")]
1225        id: String,
1226        #[serde(skip_serializing_if = "Option::is_none")]
1227        name: Option<String>,
1228        status: ToolStatus,
1229    },
1230    #[serde(rename = "tool")]
1231    ToolResult {
1232        tool_call_id: String,
1233        output: String,
1234    },
1235}
1236
1237/// The type of a tool result content item.
1238#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1239#[serde(rename_all = "lowercase")]
1240pub enum ToolResultContentType {
1241    #[default]
1242    Text,
1243}
1244
1245impl Message {
1246    pub fn system(content: &str) -> Self {
1247        Message::System {
1248            content: OneOrMany::one(content.to_owned().into()),
1249            name: None,
1250        }
1251    }
1252}
1253
1254/// Text assistant content.
1255/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1256#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1257#[serde(tag = "type", rename_all = "snake_case")]
1258pub enum AssistantContent {
1259    OutputText(Text),
1260    Refusal { refusal: String },
1261}
1262
1263impl From<AssistantContent> for completion::AssistantContent {
1264    fn from(value: AssistantContent) -> Self {
1265        match value {
1266            AssistantContent::Refusal { refusal } => {
1267                completion::AssistantContent::Text(Text { text: refusal })
1268            }
1269            AssistantContent::OutputText(Text { text }) => {
1270                completion::AssistantContent::Text(Text { text })
1271            }
1272        }
1273    }
1274}
1275
1276/// The type of assistant content.
1277#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1278#[serde(untagged)]
1279pub enum AssistantContentType {
1280    Text(AssistantContent),
1281    ToolCall(OutputFunctionCall),
1282    Reasoning(OpenAIReasoning),
1283}
1284
1285/// Different types of user content.
1286#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1287#[serde(tag = "type", rename_all = "snake_case")]
1288pub enum UserContent {
1289    InputText {
1290        text: String,
1291    },
1292    InputImage {
1293        image_url: String,
1294        #[serde(default)]
1295        detail: ImageDetail,
1296    },
1297    InputFile {
1298        #[serde(skip_serializing_if = "Option::is_none")]
1299        file_url: Option<String>,
1300        #[serde(skip_serializing_if = "Option::is_none")]
1301        file_data: Option<String>,
1302        #[serde(skip_serializing_if = "Option::is_none")]
1303        filename: Option<String>,
1304    },
1305    Audio {
1306        input_audio: InputAudio,
1307    },
1308    #[serde(rename = "tool")]
1309    ToolResult {
1310        tool_call_id: String,
1311        output: String,
1312    },
1313}
1314
1315impl TryFrom<message::Message> for Vec<Message> {
1316    type Error = message::MessageError;
1317
1318    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1319        match message {
1320            message::Message::User { content } => {
1321                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1322                    .into_iter()
1323                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1324
1325                // If there are messages with both tool results and user content, openai will only
1326                //  handle tool results. It's unlikely that there will be both.
1327                if !tool_results.is_empty() {
1328                    tool_results
1329                        .into_iter()
1330                        .map(|content| match content {
1331                            message::UserContent::ToolResult(message::ToolResult {
1332                                call_id,
1333                                content,
1334                                ..
1335                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1336                                tool_call_id: call_id.expect("The tool call ID should exist"),
1337                                output: {
1338                                    let res = content.first();
1339                                    match res {
1340                                        completion::message::ToolResultContent::Text(Text {
1341                                            text,
1342                                        }) => text,
1343                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1344                                    }
1345                                },
1346                            }),
1347                            _ => unreachable!(),
1348                        })
1349                        .collect::<Result<Vec<_>, _>>()
1350                } else {
1351                    let other_content = other_content
1352                        .into_iter()
1353                        .map(|content| match content {
1354                            message::UserContent::Text(message::Text { text }) => {
1355                                Ok(UserContent::InputText { text })
1356                            }
1357                            message::UserContent::Image(message::Image {
1358                                data,
1359                                detail,
1360                                media_type,
1361                                ..
1362                            }) => {
1363                                let url = match data {
1364                                    DocumentSourceKind::Base64(data) => {
1365                                        let media_type = if let Some(media_type) = media_type {
1366                                            media_type.to_mime_type().to_string()
1367                                        } else {
1368                                            String::new()
1369                                        };
1370                                        format!("data:{media_type};base64,{data}")
1371                                    }
1372                                    DocumentSourceKind::Url(url) => url,
1373                                    DocumentSourceKind::Raw(_) => {
1374                                        return Err(MessageError::ConversionError(
1375                                            "Raw files not supported, encode as base64 first"
1376                                                .into(),
1377                                        ));
1378                                    }
1379                                    doc => {
1380                                        return Err(MessageError::ConversionError(format!(
1381                                            "Unsupported document type: {doc}"
1382                                        )));
1383                                    }
1384                                };
1385
1386                                Ok(UserContent::InputImage {
1387                                    image_url: url,
1388                                    detail: detail.unwrap_or_default(),
1389                                })
1390                            }
1391                            message::UserContent::Document(message::Document {
1392                                media_type: Some(DocumentMediaType::PDF),
1393                                data,
1394                                ..
1395                            }) => {
1396                                let (file_data, file_url) = match data {
1397                                    DocumentSourceKind::Base64(data) => {
1398                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1399                                    }
1400                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1401                                    DocumentSourceKind::Raw(_) => {
1402                                        return Err(MessageError::ConversionError(
1403                                            "Raw files not supported, encode as base64 first"
1404                                                .into(),
1405                                        ));
1406                                    }
1407                                    doc => {
1408                                        return Err(MessageError::ConversionError(format!(
1409                                            "Unsupported document type: {doc}"
1410                                        )));
1411                                    }
1412                                };
1413
1414                                Ok(UserContent::InputFile {
1415                                    file_url,
1416                                    file_data,
1417                                    filename: Some("document.pdf".into()),
1418                                })
1419                            }
1420                            message::UserContent::Document(message::Document {
1421                                data: DocumentSourceKind::Base64(text),
1422                                ..
1423                            }) => Ok(UserContent::InputText { text }),
1424                            message::UserContent::Audio(message::Audio {
1425                                data: DocumentSourceKind::Base64(data),
1426                                media_type,
1427                                ..
1428                            }) => Ok(UserContent::Audio {
1429                                input_audio: InputAudio {
1430                                    data,
1431                                    format: match media_type {
1432                                        Some(media_type) => media_type,
1433                                        None => AudioMediaType::MP3,
1434                                    },
1435                                },
1436                            }),
1437                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1438                                "Audio must be base64 encoded data".into(),
1439                            )),
1440                            _ => unreachable!(),
1441                        })
1442                        .collect::<Result<Vec<_>, _>>()?;
1443
1444                    let other_content = OneOrMany::many(other_content).expect(
1445                        "There must be other content here if there were no tool result content",
1446                    );
1447
1448                    Ok(vec![Message::User {
1449                        content: other_content,
1450                        name: None,
1451                    }])
1452                }
1453            }
1454            message::Message::Assistant { content, id } => {
1455                let assistant_message_id = id;
1456
1457                match content.first() {
1458                    crate::message::AssistantContent::Text(Text { text }) => {
1459                        Ok(vec![Message::Assistant {
1460                            id: assistant_message_id
1461                                .expect("The assistant message ID should exist"),
1462                            status: ToolStatus::Completed,
1463                            content: OneOrMany::one(AssistantContentType::Text(
1464                                AssistantContent::OutputText(Text { text }),
1465                            )),
1466                            name: None,
1467                        }])
1468                    }
1469                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1470                        id,
1471                        call_id,
1472                        function,
1473                    }) => Ok(vec![Message::Assistant {
1474                        content: OneOrMany::one(AssistantContentType::ToolCall(
1475                            OutputFunctionCall {
1476                                call_id: call_id.expect("The call ID should exist"),
1477                                arguments: function.arguments,
1478                                id,
1479                                name: function.name,
1480                                status: ToolStatus::Completed,
1481                            },
1482                        )),
1483                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1484                        name: None,
1485                        status: ToolStatus::Completed,
1486                    }]),
1487                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1488                        id,
1489                        reasoning,
1490                        ..
1491                    }) => Ok(vec![Message::Assistant {
1492                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1493                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1494                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1495                            encrypted_content: None,
1496                            status: Some(ToolStatus::Completed),
1497                        })),
1498                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1499                        name: None,
1500                        status: (ToolStatus::Completed),
1501                    }]),
1502                    crate::message::AssistantContent::Image(_) => {
1503                        Err(MessageError::ConversionError(
1504                            "Assistant image content is not supported in OpenAI Responses API".into(),
1505                        ))
1506                    }
1507                }
1508            }
1509        }
1510    }
1511}
1512
1513impl FromStr for UserContent {
1514    type Err = Infallible;
1515
1516    fn from_str(s: &str) -> Result<Self, Self::Err> {
1517        Ok(UserContent::InputText {
1518            text: s.to_string(),
1519        })
1520    }
1521}