rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::completion::ToolChoice;
11use super::{Client, responses_api::streaming::StreamingCompletionResponse};
12use super::{InputAudio, SystemContent};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::{OneOrMany, completion, message};
24use serde::{Deserialize, Serialize};
25use serde_json::{Map, Value};
26use tracing::{Instrument, info_span};
27
28use std::convert::Infallible;
29use std::ops::Add;
30use std::str::FromStr;
31
32pub mod streaming;
33
34/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
35/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
36#[derive(Debug, Deserialize, Serialize, Clone)]
37pub struct CompletionRequest {
38    /// Message inputs
39    pub input: OneOrMany<InputItem>,
40    /// The model name
41    pub model: String,
42    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
43    #[serde(skip_serializing_if = "Option::is_none")]
44    pub instructions: Option<String>,
45    /// The maximum number of output tokens.
46    #[serde(skip_serializing_if = "Option::is_none")]
47    pub max_output_tokens: Option<u64>,
48    /// Toggle to true for streaming responses.
49    #[serde(skip_serializing_if = "Option::is_none")]
50    pub stream: Option<bool>,
51    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
52    #[serde(skip_serializing_if = "Option::is_none")]
53    pub temperature: Option<f64>,
54    /// Whether the LLM should be forced to use a tool before returning a response.
55    /// If none provided, the default option is "auto".
56    #[serde(skip_serializing_if = "Option::is_none")]
57    tool_choice: Option<ToolChoice>,
58    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
59    #[serde(skip_serializing_if = "Vec::is_empty")]
60    pub tools: Vec<ResponsesToolDefinition>,
61    /// Additional parameters
62    #[serde(flatten)]
63    pub additional_parameters: AdditionalParameters,
64}
65
66impl CompletionRequest {
67    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
68    where
69        S: Into<String>,
70    {
71        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
72
73        self
74    }
75
76    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
77        self.additional_parameters.reasoning = Some(reasoning);
78
79        self
80    }
81}
82
83/// An input item for [`CompletionRequest`].
84#[derive(Debug, Deserialize, Serialize, Clone)]
85pub struct InputItem {
86    /// The role of an input item/message.
87    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
88    /// Everything else should be None.
89    #[serde(skip_serializing_if = "Option::is_none")]
90    role: Option<Role>,
91    /// The input content itself.
92    #[serde(flatten)]
93    input: InputContent,
94}
95
96/// Message roles. Used by OpenAI Responses API to determine who created a given message.
97#[derive(Debug, Deserialize, Serialize, Clone)]
98#[serde(rename_all = "lowercase")]
99pub enum Role {
100    User,
101    Assistant,
102    System,
103}
104
105/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
106#[derive(Debug, Deserialize, Serialize, Clone)]
107#[serde(tag = "type", rename_all = "snake_case")]
108pub enum InputContent {
109    Message(Message),
110    Reasoning(OpenAIReasoning),
111    FunctionCall(OutputFunctionCall),
112    FunctionCallOutput(ToolResult),
113}
114
115#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
116pub struct OpenAIReasoning {
117    id: String,
118    pub summary: Vec<ReasoningSummary>,
119    pub encrypted_content: Option<String>,
120    #[serde(skip_serializing_if = "Option::is_none")]
121    pub status: Option<ToolStatus>,
122}
123
124#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
125#[serde(tag = "type", rename_all = "snake_case")]
126pub enum ReasoningSummary {
127    SummaryText { text: String },
128}
129
130impl ReasoningSummary {
131    fn new(input: &str) -> Self {
132        Self::SummaryText {
133            text: input.to_string(),
134        }
135    }
136
137    pub fn text(&self) -> String {
138        let ReasoningSummary::SummaryText { text } = self;
139        text.clone()
140    }
141}
142
143/// A tool result.
144#[derive(Debug, Deserialize, Serialize, Clone)]
145pub struct ToolResult {
146    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
147    call_id: String,
148    /// The result of a tool call.
149    output: String,
150    /// The status of a tool call (if used in a completion request, this should always be Completed)
151    status: ToolStatus,
152}
153
154impl From<Message> for InputItem {
155    fn from(value: Message) -> Self {
156        match value {
157            Message::User { .. } => Self {
158                role: Some(Role::User),
159                input: InputContent::Message(value),
160            },
161            Message::Assistant { ref content, .. } => {
162                let role = if content
163                    .clone()
164                    .iter()
165                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
166                {
167                    None
168                } else {
169                    Some(Role::Assistant)
170                };
171                Self {
172                    role,
173                    input: InputContent::Message(value),
174                }
175            }
176            Message::System { .. } => Self {
177                role: Some(Role::System),
178                input: InputContent::Message(value),
179            },
180            Message::ToolResult {
181                tool_call_id,
182                output,
183            } => Self {
184                role: None,
185                input: InputContent::FunctionCallOutput(ToolResult {
186                    call_id: tool_call_id,
187                    output,
188                    status: ToolStatus::Completed,
189                }),
190            },
191        }
192    }
193}
194
195impl TryFrom<crate::completion::Message> for Vec<InputItem> {
196    type Error = CompletionError;
197
198    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
199        match value {
200            crate::completion::Message::User { content } => {
201                let mut items = Vec::new();
202
203                for user_content in content {
204                    match user_content {
205                        crate::message::UserContent::Text(Text { text }) => {
206                            items.push(InputItem {
207                                role: Some(Role::User),
208                                input: InputContent::Message(Message::User {
209                                    content: OneOrMany::one(UserContent::InputText { text }),
210                                    name: None,
211                                }),
212                            });
213                        }
214                        crate::message::UserContent::ToolResult(
215                            crate::completion::message::ToolResult {
216                                call_id,
217                                content: tool_content,
218                                ..
219                            },
220                        ) => {
221                            for tool_result_content in tool_content {
222                                let crate::completion::message::ToolResultContent::Text(Text {
223                                    text,
224                                }) = tool_result_content
225                                else {
226                                    return Err(CompletionError::ProviderError(
227                                        "This thing only supports text!".to_string(),
228                                    ));
229                                };
230                                // let output = serde_json::from_str(&text)?;
231                                items.push(InputItem {
232                                    role: None,
233                                    input: InputContent::FunctionCallOutput(ToolResult {
234                                        call_id: call_id
235                                            .clone()
236                                            .expect("The call ID of this tool should exist!"),
237                                        output: text,
238                                        status: ToolStatus::Completed,
239                                    }),
240                                });
241                            }
242                        }
243                        crate::message::UserContent::Document(Document {
244                            data,
245                            media_type: Some(DocumentMediaType::PDF),
246                            ..
247                        }) => {
248                            let (file_data, file_url) = match data {
249                                DocumentSourceKind::Base64(data) => {
250                                    (Some(format!("data:application/pdf;base64,{data}")), None)
251                                }
252                                DocumentSourceKind::Url(url) => (None, Some(url)),
253                                DocumentSourceKind::Raw(_) => {
254                                    return Err(CompletionError::RequestError(
255                                        "Raw file data not supported, encode as base64 first"
256                                            .into(),
257                                    ));
258                                }
259                                doc => {
260                                    return Err(CompletionError::RequestError(
261                                        format!("Unsupported document type: {doc}").into(),
262                                    ));
263                                }
264                            };
265
266                            items.push(InputItem {
267                                role: Some(Role::User),
268                                input: InputContent::Message(Message::User {
269                                    content: OneOrMany::one(UserContent::InputFile {
270                                        file_data,
271                                        file_url,
272                                        filename: Some("document.pdf".to_string()),
273                                    }),
274                                    name: None,
275                                }),
276                            })
277                        }
278                        // todo: should we ensure this takes into account file size?
279                        crate::message::UserContent::Document(Document {
280                            data: DocumentSourceKind::Base64(text),
281                            ..
282                        }) => items.push(InputItem {
283                            role: Some(Role::User),
284                            input: InputContent::Message(Message::User {
285                                content: OneOrMany::one(UserContent::InputText { text }),
286                                name: None,
287                            }),
288                        }),
289                        crate::message::UserContent::Document(Document {
290                            data: DocumentSourceKind::String(text),
291                            ..
292                        }) => items.push(InputItem {
293                            role: Some(Role::User),
294                            input: InputContent::Message(Message::User {
295                                content: OneOrMany::one(UserContent::InputText { text }),
296                                name: None,
297                            }),
298                        }),
299                        crate::message::UserContent::Image(crate::message::Image {
300                            data,
301                            media_type,
302                            detail,
303                            ..
304                        }) => {
305                            let url = match data {
306                                DocumentSourceKind::Base64(data) => {
307                                    let media_type = if let Some(media_type) = media_type {
308                                        media_type.to_mime_type().to_string()
309                                    } else {
310                                        String::new()
311                                    };
312                                    format!("data:{media_type};base64,{data}")
313                                }
314                                DocumentSourceKind::Url(url) => url,
315                                DocumentSourceKind::Raw(_) => {
316                                    return Err(CompletionError::RequestError(
317                                        "Raw file data not supported, encode as base64 first"
318                                            .into(),
319                                    ));
320                                }
321                                doc => {
322                                    return Err(CompletionError::RequestError(
323                                        format!("Unsupported document type: {doc}").into(),
324                                    ));
325                                }
326                            };
327                            items.push(InputItem {
328                                role: Some(Role::User),
329                                input: InputContent::Message(Message::User {
330                                    content: OneOrMany::one(UserContent::InputImage {
331                                        image_url: url,
332                                        detail: detail.unwrap_or_default(),
333                                    }),
334                                    name: None,
335                                }),
336                            });
337                        }
338                        message => {
339                            return Err(CompletionError::ProviderError(format!(
340                                "Unsupported message: {message:?}"
341                            )));
342                        }
343                    }
344                }
345
346                Ok(items)
347            }
348            crate::completion::Message::Assistant { id, content } => {
349                let mut items = Vec::new();
350
351                for assistant_content in content {
352                    match assistant_content {
353                        crate::message::AssistantContent::Text(Text { text }) => {
354                            let id = id.as_ref().unwrap_or(&String::default()).clone();
355                            items.push(InputItem {
356                                role: Some(Role::Assistant),
357                                input: InputContent::Message(Message::Assistant {
358                                    content: OneOrMany::one(AssistantContentType::Text(
359                                        AssistantContent::OutputText(Text { text }),
360                                    )),
361                                    id,
362                                    name: None,
363                                    status: ToolStatus::Completed,
364                                }),
365                            });
366                        }
367                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
368                            id: tool_id,
369                            call_id,
370                            function,
371                        }) => {
372                            items.push(InputItem {
373                                role: None,
374                                input: InputContent::FunctionCall(OutputFunctionCall {
375                                    arguments: function.arguments,
376                                    call_id: call_id.expect("The tool call ID should exist!"),
377                                    id: tool_id,
378                                    name: function.name,
379                                    status: ToolStatus::Completed,
380                                }),
381                            });
382                        }
383                        crate::message::AssistantContent::Reasoning(
384                            crate::message::Reasoning { id, reasoning, .. },
385                        ) => {
386                            items.push(InputItem {
387                                role: None,
388                                input: InputContent::Reasoning(OpenAIReasoning {
389                                    id: id
390                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
391                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
392                                    encrypted_content: None,
393                                    status: None,
394                                }),
395                            });
396                        }
397                    }
398                }
399
400                Ok(items)
401            }
402        }
403    }
404}
405
406impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
407    fn from(value: OneOrMany<String>) -> Self {
408        value.iter().map(|x| ReasoningSummary::new(x)).collect()
409    }
410}
411
412/// The definition of a tool response, repurposed for OpenAI's Responses API.
413#[derive(Debug, Deserialize, Serialize, Clone)]
414pub struct ResponsesToolDefinition {
415    /// Tool name
416    pub name: String,
417    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
418    pub parameters: serde_json::Value,
419    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
420    pub strict: bool,
421    /// The type of tool. This should always be "function".
422    #[serde(rename = "type")]
423    pub kind: String,
424    /// Tool description.
425    pub description: String,
426}
427
428/// Recursively ensures all object schemas in a JSON schema have `additionalProperties: false`.
429/// Nested arrays, schema $defs, object properties and enums should be handled through this method
430/// This seems to be required by OpenAI's Responses API when using strict mode.
431fn add_props_false(schema: &mut serde_json::Value) {
432    if let Value::Object(obj) = schema {
433        let is_object_schema = obj.get("type") == Some(&Value::String("object".to_string()))
434            || obj.contains_key("properties");
435
436        if is_object_schema && !obj.contains_key("additionalProperties") {
437            obj.insert("additionalProperties".to_string(), Value::Bool(false));
438        }
439
440        if let Some(defs) = obj.get_mut("$defs")
441            && let Value::Object(defs_obj) = defs
442        {
443            for (_, def_schema) in defs_obj.iter_mut() {
444                add_props_false(def_schema);
445            }
446        }
447
448        if let Some(properties) = obj.get_mut("properties")
449            && let Value::Object(props) = properties
450        {
451            for (_, prop_value) in props.iter_mut() {
452                add_props_false(prop_value);
453            }
454        }
455
456        if let Some(items) = obj.get_mut("items") {
457            add_props_false(items);
458        }
459
460        // should handle Enums (anyOf/oneOf)
461        for key in ["anyOf", "oneOf", "allOf"] {
462            if let Some(variants) = obj.get_mut(key)
463                && let Value::Array(variants_array) = variants
464            {
465                for variant in variants_array.iter_mut() {
466                    add_props_false(variant);
467                }
468            }
469        }
470    }
471}
472
473impl From<completion::ToolDefinition> for ResponsesToolDefinition {
474    fn from(value: completion::ToolDefinition) -> Self {
475        let completion::ToolDefinition {
476            name,
477            mut parameters,
478            description,
479        } = value;
480
481        add_props_false(&mut parameters);
482
483        Self {
484            name,
485            parameters,
486            description,
487            kind: "function".to_string(),
488            strict: true,
489        }
490    }
491}
492
493/// Token usage.
494/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
495#[derive(Clone, Debug, Serialize, Deserialize)]
496pub struct ResponsesUsage {
497    /// Input tokens
498    pub input_tokens: u64,
499    /// In-depth detail on input tokens (cached tokens)
500    #[serde(skip_serializing_if = "Option::is_none")]
501    pub input_tokens_details: Option<InputTokensDetails>,
502    /// Output tokens
503    pub output_tokens: u64,
504    /// In-depth detail on output tokens (reasoning tokens)
505    pub output_tokens_details: OutputTokensDetails,
506    /// Total tokens used (for a given prompt)
507    pub total_tokens: u64,
508}
509
510impl ResponsesUsage {
511    /// Create a new ResponsesUsage instance
512    pub(crate) fn new() -> Self {
513        Self {
514            input_tokens: 0,
515            input_tokens_details: Some(InputTokensDetails::new()),
516            output_tokens: 0,
517            output_tokens_details: OutputTokensDetails::new(),
518            total_tokens: 0,
519        }
520    }
521}
522
523impl Add for ResponsesUsage {
524    type Output = Self;
525
526    fn add(self, rhs: Self) -> Self::Output {
527        let input_tokens = self.input_tokens + rhs.input_tokens;
528        let input_tokens_details = self.input_tokens_details.map(|lhs| {
529            if let Some(tokens) = rhs.input_tokens_details {
530                lhs + tokens
531            } else {
532                lhs
533            }
534        });
535        let output_tokens = self.output_tokens + rhs.output_tokens;
536        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
537        let total_tokens = self.total_tokens + rhs.total_tokens;
538        Self {
539            input_tokens,
540            input_tokens_details,
541            output_tokens,
542            output_tokens_details,
543            total_tokens,
544        }
545    }
546}
547
548/// In-depth details on input tokens.
549#[derive(Clone, Debug, Serialize, Deserialize)]
550pub struct InputTokensDetails {
551    /// Cached tokens from OpenAI
552    pub cached_tokens: u64,
553}
554
555impl InputTokensDetails {
556    pub(crate) fn new() -> Self {
557        Self { cached_tokens: 0 }
558    }
559}
560
561impl Add for InputTokensDetails {
562    type Output = Self;
563    fn add(self, rhs: Self) -> Self::Output {
564        Self {
565            cached_tokens: self.cached_tokens + rhs.cached_tokens,
566        }
567    }
568}
569
570/// In-depth details on output tokens.
571#[derive(Clone, Debug, Serialize, Deserialize)]
572pub struct OutputTokensDetails {
573    /// Reasoning tokens
574    pub reasoning_tokens: u64,
575}
576
577impl OutputTokensDetails {
578    pub(crate) fn new() -> Self {
579        Self {
580            reasoning_tokens: 0,
581        }
582    }
583}
584
585impl Add for OutputTokensDetails {
586    type Output = Self;
587    fn add(self, rhs: Self) -> Self::Output {
588        Self {
589            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
590        }
591    }
592}
593
594/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
595#[derive(Clone, Debug, Default, Serialize, Deserialize)]
596pub struct IncompleteDetailsReason {
597    /// The reason for an incomplete [`CompletionResponse`].
598    pub reason: String,
599}
600
601/// A response error from OpenAI's Response API.
602#[derive(Clone, Debug, Default, Serialize, Deserialize)]
603pub struct ResponseError {
604    /// Error code
605    pub code: String,
606    /// Error message
607    pub message: String,
608}
609
610/// A response object as an enum (ensures type validation)
611#[derive(Clone, Debug, Deserialize, Serialize)]
612#[serde(rename_all = "snake_case")]
613pub enum ResponseObject {
614    Response,
615}
616
617/// The response status as an enum (ensures type validation)
618#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
619#[serde(rename_all = "snake_case")]
620pub enum ResponseStatus {
621    InProgress,
622    Completed,
623    Failed,
624    Cancelled,
625    Queued,
626    Incomplete,
627}
628
629/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
630impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
631    type Error = CompletionError;
632    fn try_from(
633        (model, req): (String, crate::completion::CompletionRequest),
634    ) -> Result<Self, Self::Error> {
635        let input = {
636            let mut partial_history = vec![];
637            if let Some(docs) = req.normalized_documents() {
638                partial_history.push(docs);
639            }
640            partial_history.extend(req.chat_history);
641
642            // Initialize full history with preamble (or empty if non-existent)
643            let mut full_history: Vec<InputItem> = Vec::new();
644
645            // Convert and extend the rest of the history
646            full_history.extend(
647                partial_history
648                    .into_iter()
649                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
650                    .collect::<Vec<Vec<InputItem>>>()
651                    .into_iter()
652                    .flatten()
653                    .collect::<Vec<InputItem>>(),
654            );
655
656            full_history
657        };
658
659        let input = OneOrMany::many(input)
660            .expect("This should never panic - if it does, please file a bug report");
661
662        let stream = req
663            .additional_params
664            .clone()
665            .unwrap_or(Value::Null)
666            .as_bool();
667
668        let additional_parameters = if let Some(map) = req.additional_params {
669            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
670        } else {
671            // If there's no additional parameters, initialise an empty object
672            AdditionalParameters::default()
673        };
674
675        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
676
677        Ok(Self {
678            input,
679            model,
680            instructions: req.preamble,
681            max_output_tokens: req.max_tokens,
682            stream,
683            tool_choice,
684            tools: req
685                .tools
686                .into_iter()
687                .map(ResponsesToolDefinition::from)
688                .collect(),
689            temperature: req.temperature,
690            additional_parameters,
691        })
692    }
693}
694
695/// The completion model struct for OpenAI's response API.
696#[derive(Clone)]
697pub struct ResponsesCompletionModel<T = reqwest::Client> {
698    /// The OpenAI client
699    pub(crate) client: Client<T>,
700    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
701    pub model: String,
702}
703
704impl<T> ResponsesCompletionModel<T>
705where
706    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
707{
708    /// Creates a new [`ResponsesCompletionModel`].
709    pub fn new(client: Client<T>, model: &str) -> Self {
710        Self {
711            client,
712            model: model.to_string(),
713        }
714    }
715
716    /// Use the Completions API instead of Responses.
717    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
718        crate::providers::openai::completion::CompletionModel::new(self.client, &self.model)
719    }
720
721    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
722    pub(crate) fn create_completion_request(
723        &self,
724        completion_request: crate::completion::CompletionRequest,
725    ) -> Result<CompletionRequest, CompletionError> {
726        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
727
728        Ok(req)
729    }
730}
731
732/// The standard response format from OpenAI's Responses API.
733#[derive(Clone, Debug, Serialize, Deserialize)]
734pub struct CompletionResponse {
735    /// The ID of a completion response.
736    pub id: String,
737    /// The type of the object.
738    pub object: ResponseObject,
739    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
740    pub created_at: u64,
741    /// The status of the response.
742    pub status: ResponseStatus,
743    /// Response error (optional)
744    pub error: Option<ResponseError>,
745    /// Incomplete response details (optional)
746    pub incomplete_details: Option<IncompleteDetailsReason>,
747    /// System prompt/preamble
748    pub instructions: Option<String>,
749    /// The maximum number of tokens the model should output
750    pub max_output_tokens: Option<u64>,
751    /// The model name
752    pub model: String,
753    /// Token usage
754    pub usage: Option<ResponsesUsage>,
755    /// The model output (messages, etc will go here)
756    pub output: Vec<Output>,
757    /// Tools
758    pub tools: Vec<ResponsesToolDefinition>,
759    /// Additional parameters
760    #[serde(flatten)]
761    pub additional_parameters: AdditionalParameters,
762}
763
764/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
765/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
766#[derive(Clone, Debug, Deserialize, Serialize, Default)]
767pub struct AdditionalParameters {
768    /// Whether or not a given model task should run in the background (ie a detached process).
769    #[serde(skip_serializing_if = "Option::is_none")]
770    pub background: Option<bool>,
771    /// The text response format. This is where you would add structured outputs (if you want them).
772    #[serde(skip_serializing_if = "Option::is_none")]
773    pub text: Option<TextConfig>,
774    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
775    #[serde(skip_serializing_if = "Option::is_none")]
776    pub include: Option<Vec<Include>>,
777    /// `top_p`. Mutually exclusive with the `temperature` argument.
778    #[serde(skip_serializing_if = "Option::is_none")]
779    pub top_p: Option<f64>,
780    /// Whether or not the response should be truncated.
781    #[serde(skip_serializing_if = "Option::is_none")]
782    pub truncation: Option<TruncationStrategy>,
783    /// The username of the user (that you want to use).
784    #[serde(skip_serializing_if = "Option::is_none")]
785    pub user: Option<String>,
786    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
787    #[serde(skip_serializing_if = "Map::is_empty", default)]
788    pub metadata: serde_json::Map<String, serde_json::Value>,
789    /// Whether or not you want tool calls to run in parallel.
790    #[serde(skip_serializing_if = "Option::is_none")]
791    pub parallel_tool_calls: Option<bool>,
792    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
793    #[serde(skip_serializing_if = "Option::is_none")]
794    pub previous_response_id: Option<String>,
795    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
796    #[serde(skip_serializing_if = "Option::is_none")]
797    pub reasoning: Option<Reasoning>,
798    /// The service tier you're using.
799    #[serde(skip_serializing_if = "Option::is_none")]
800    pub service_tier: Option<OpenAIServiceTier>,
801    /// Whether or not to store the response for later retrieval by API.
802    #[serde(skip_serializing_if = "Option::is_none")]
803    pub store: Option<bool>,
804}
805
806impl AdditionalParameters {
807    pub fn to_json(self) -> serde_json::Value {
808        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
809    }
810}
811
812/// The truncation strategy.
813/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
814/// Otherwise, does nothing (and is disabled by default).
815#[derive(Clone, Debug, Default, Serialize, Deserialize)]
816#[serde(rename_all = "snake_case")]
817pub enum TruncationStrategy {
818    Auto,
819    #[default]
820    Disabled,
821}
822
823/// The model output format configuration.
824/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
825#[derive(Clone, Debug, Serialize, Deserialize)]
826pub struct TextConfig {
827    pub format: TextFormat,
828}
829
830impl TextConfig {
831    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
832    where
833        S: Into<String>,
834    {
835        Self {
836            format: TextFormat::JsonSchema(StructuredOutputsInput {
837                name: name.into(),
838                schema,
839                strict: true,
840            }),
841        }
842    }
843}
844
845/// The text format (contained by [`TextConfig`]).
846/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
847#[derive(Clone, Debug, Serialize, Deserialize, Default)]
848#[serde(tag = "type")]
849#[serde(rename_all = "snake_case")]
850pub enum TextFormat {
851    JsonSchema(StructuredOutputsInput),
852    #[default]
853    Text,
854}
855
856/// The inputs required for adding structured outputs.
857#[derive(Clone, Debug, Serialize, Deserialize)]
858pub struct StructuredOutputsInput {
859    /// The name of your schema.
860    pub name: String,
861    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
862    pub schema: serde_json::Value,
863    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
864    pub strict: bool,
865}
866
867/// Add reasoning to a [`CompletionRequest`].
868#[derive(Clone, Debug, Default, Serialize, Deserialize)]
869pub struct Reasoning {
870    /// How much effort you want the model to put into thinking/reasoning.
871    pub effort: Option<ReasoningEffort>,
872    /// How much effort you want the model to put into writing the reasoning summary.
873    #[serde(skip_serializing_if = "Option::is_none")]
874    pub summary: Option<ReasoningSummaryLevel>,
875}
876
877impl Reasoning {
878    /// Creates a new Reasoning instantiation (with empty values).
879    pub fn new() -> Self {
880        Self {
881            effort: None,
882            summary: None,
883        }
884    }
885
886    /// Adds reasoning effort.
887    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
888        self.effort = Some(reasoning_effort);
889
890        self
891    }
892
893    /// Adds summary level (how detailed the reasoning summary will be).
894    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
895        self.summary = Some(reasoning_summary_level);
896
897        self
898    }
899}
900
901/// The billing service tier that will be used. On auto by default.
902#[derive(Clone, Debug, Default, Serialize, Deserialize)]
903#[serde(rename_all = "snake_case")]
904pub enum OpenAIServiceTier {
905    #[default]
906    Auto,
907    Default,
908    Flex,
909}
910
911/// The amount of reasoning effort that will be used by a given model.
912#[derive(Clone, Debug, Default, Serialize, Deserialize)]
913#[serde(rename_all = "snake_case")]
914pub enum ReasoningEffort {
915    Minimal,
916    Low,
917    #[default]
918    Medium,
919    High,
920}
921
922/// The amount of effort that will go into a reasoning summary by a given model.
923#[derive(Clone, Debug, Default, Serialize, Deserialize)]
924#[serde(rename_all = "snake_case")]
925pub enum ReasoningSummaryLevel {
926    #[default]
927    Auto,
928    Concise,
929    Detailed,
930}
931
932/// Results to additionally include in the OpenAI Responses API.
933/// Note that most of these are currently unsupported, but have been added for completeness.
934#[derive(Clone, Debug, Deserialize, Serialize)]
935pub enum Include {
936    #[serde(rename = "file_search_call.results")]
937    FileSearchCallResults,
938    #[serde(rename = "message.input_image.image_url")]
939    MessageInputImageImageUrl,
940    #[serde(rename = "computer_call.output.image_url")]
941    ComputerCallOutputOutputImageUrl,
942    #[serde(rename = "reasoning.encrypted_content")]
943    ReasoningEncryptedContent,
944    #[serde(rename = "code_interpreter_call.outputs")]
945    CodeInterpreterCallOutputs,
946}
947
948/// A currently non-exhaustive list of output types.
949#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
950#[serde(tag = "type")]
951#[serde(rename_all = "snake_case")]
952pub enum Output {
953    Message(OutputMessage),
954    #[serde(alias = "function_call")]
955    FunctionCall(OutputFunctionCall),
956    Reasoning {
957        id: String,
958        summary: Vec<ReasoningSummary>,
959    },
960}
961
962impl From<Output> for Vec<completion::AssistantContent> {
963    fn from(value: Output) -> Self {
964        let res: Vec<completion::AssistantContent> = match value {
965            Output::Message(OutputMessage { content, .. }) => content
966                .into_iter()
967                .map(completion::AssistantContent::from)
968                .collect(),
969            Output::FunctionCall(OutputFunctionCall {
970                id,
971                arguments,
972                call_id,
973                name,
974                ..
975            }) => vec![completion::AssistantContent::tool_call_with_call_id(
976                id, call_id, name, arguments,
977            )],
978            Output::Reasoning { id, summary } => {
979                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
980
981                vec![completion::AssistantContent::Reasoning(
982                    message::Reasoning::multi(summary).with_id(id),
983                )]
984            }
985        };
986
987        res
988    }
989}
990
991#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
992pub struct OutputReasoning {
993    id: String,
994    summary: Vec<ReasoningSummary>,
995    status: ToolStatus,
996}
997
998/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
999#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1000pub struct OutputFunctionCall {
1001    pub id: String,
1002    #[serde(with = "json_utils::stringified_json")]
1003    pub arguments: serde_json::Value,
1004    pub call_id: String,
1005    pub name: String,
1006    pub status: ToolStatus,
1007}
1008
1009/// The status of a given tool.
1010#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1011#[serde(rename_all = "snake_case")]
1012pub enum ToolStatus {
1013    InProgress,
1014    Completed,
1015    Incomplete,
1016}
1017
1018/// An output message from OpenAI's Responses API.
1019#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1020pub struct OutputMessage {
1021    /// The message ID. Must be included when sending the message back to OpenAI
1022    pub id: String,
1023    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
1024    pub role: OutputRole,
1025    /// The status of the response
1026    pub status: ResponseStatus,
1027    /// The actual message content
1028    pub content: Vec<AssistantContent>,
1029}
1030
1031/// The role of an output message.
1032#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1033#[serde(rename_all = "snake_case")]
1034pub enum OutputRole {
1035    Assistant,
1036}
1037
1038impl<T> completion::CompletionModel for ResponsesCompletionModel<T>
1039where
1040    T: HttpClientExt + Clone + std::fmt::Debug + Default + Send + 'static,
1041{
1042    type Response = CompletionResponse;
1043    type StreamingResponse = StreamingCompletionResponse;
1044
1045    #[cfg_attr(feature = "worker", worker::send)]
1046    async fn completion(
1047        &self,
1048        completion_request: crate::completion::CompletionRequest,
1049    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1050        let span = if tracing::Span::current().is_disabled() {
1051            info_span!(
1052                target: "rig::completions",
1053                "chat",
1054                gen_ai.operation.name = "chat",
1055                gen_ai.provider.name = tracing::field::Empty,
1056                gen_ai.request.model = tracing::field::Empty,
1057                gen_ai.response.id = tracing::field::Empty,
1058                gen_ai.response.model = tracing::field::Empty,
1059                gen_ai.usage.output_tokens = tracing::field::Empty,
1060                gen_ai.usage.input_tokens = tracing::field::Empty,
1061                gen_ai.input.messages = tracing::field::Empty,
1062                gen_ai.output.messages = tracing::field::Empty,
1063            )
1064        } else {
1065            tracing::Span::current()
1066        };
1067
1068        span.record("gen_ai.provider.name", "openai");
1069        span.record("gen_ai.request.model", &self.model);
1070        let request = self.create_completion_request(completion_request)?;
1071        span.record(
1072            "gen_ai.input.messages",
1073            serde_json::to_string(&request.input)
1074                .expect("openai request to successfully turn into a JSON value"),
1075        );
1076        let body = serde_json::to_vec(&request)?;
1077        tracing::debug!(
1078            "OpenAI Responses API input: {request}",
1079            request = serde_json::to_string_pretty(&request).unwrap()
1080        );
1081
1082        let req = self
1083            .client
1084            .post("/responses")?
1085            .header("Content-Type", "application/json")
1086            .body(body)
1087            .map_err(|e| CompletionError::HttpError(e.into()))?;
1088
1089        async move {
1090            let response = self.client.send(req).await?;
1091
1092            if response.status().is_success() {
1093                let t = http_client::text(response).await?;
1094                let response = serde_json::from_str::<Self::Response>(&t)?;
1095                let span = tracing::Span::current();
1096                span.record(
1097                    "gen_ai.output.messages",
1098                    serde_json::to_string(&response.output).unwrap(),
1099                );
1100                span.record("gen_ai.response.id", &response.id);
1101                span.record("gen_ai.response.model", &response.model);
1102                if let Some(ref usage) = response.usage {
1103                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1104                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1105                }
1106                // We need to call the event here to get the span to actually send anything
1107                tracing::info!("API successfully called");
1108                response.try_into()
1109            } else {
1110                let text = http_client::text(response).await?;
1111                Err(CompletionError::ProviderError(text))
1112            }
1113        }
1114        .instrument(span)
1115        .await
1116    }
1117
1118    #[cfg_attr(feature = "worker", worker::send)]
1119    async fn stream(
1120        &self,
1121        request: crate::completion::CompletionRequest,
1122    ) -> Result<
1123        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1124        CompletionError,
1125    > {
1126        ResponsesCompletionModel::stream(self, request).await
1127    }
1128}
1129
1130impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1131    type Error = CompletionError;
1132
1133    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1134        if response.output.is_empty() {
1135            return Err(CompletionError::ResponseError(
1136                "Response contained no parts".to_owned(),
1137            ));
1138        }
1139
1140        let content: Vec<completion::AssistantContent> = response
1141            .output
1142            .iter()
1143            .cloned()
1144            .flat_map(<Vec<completion::AssistantContent>>::from)
1145            .collect();
1146
1147        let choice = OneOrMany::many(content).map_err(|_| {
1148            CompletionError::ResponseError(
1149                "Response contained no message or tool call (empty)".to_owned(),
1150            )
1151        })?;
1152
1153        let usage = response
1154            .usage
1155            .as_ref()
1156            .map(|usage| completion::Usage {
1157                input_tokens: usage.input_tokens,
1158                output_tokens: usage.output_tokens,
1159                total_tokens: usage.total_tokens,
1160            })
1161            .unwrap_or_default();
1162
1163        Ok(completion::CompletionResponse {
1164            choice,
1165            usage,
1166            raw_response: response,
1167        })
1168    }
1169}
1170
1171/// An OpenAI Responses API message.
1172#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1173#[serde(tag = "role", rename_all = "lowercase")]
1174pub enum Message {
1175    #[serde(alias = "developer")]
1176    System {
1177        #[serde(deserialize_with = "string_or_one_or_many")]
1178        content: OneOrMany<SystemContent>,
1179        #[serde(skip_serializing_if = "Option::is_none")]
1180        name: Option<String>,
1181    },
1182    User {
1183        #[serde(deserialize_with = "string_or_one_or_many")]
1184        content: OneOrMany<UserContent>,
1185        #[serde(skip_serializing_if = "Option::is_none")]
1186        name: Option<String>,
1187    },
1188    Assistant {
1189        content: OneOrMany<AssistantContentType>,
1190        #[serde(skip_serializing_if = "String::is_empty")]
1191        id: String,
1192        #[serde(skip_serializing_if = "Option::is_none")]
1193        name: Option<String>,
1194        status: ToolStatus,
1195    },
1196    #[serde(rename = "tool")]
1197    ToolResult {
1198        tool_call_id: String,
1199        output: String,
1200    },
1201}
1202
1203/// The type of a tool result content item.
1204#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1205#[serde(rename_all = "lowercase")]
1206pub enum ToolResultContentType {
1207    #[default]
1208    Text,
1209}
1210
1211impl Message {
1212    pub fn system(content: &str) -> Self {
1213        Message::System {
1214            content: OneOrMany::one(content.to_owned().into()),
1215            name: None,
1216        }
1217    }
1218}
1219
1220/// Text assistant content.
1221/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1222#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1223#[serde(tag = "type", rename_all = "snake_case")]
1224pub enum AssistantContent {
1225    OutputText(Text),
1226    Refusal { refusal: String },
1227}
1228
1229impl From<AssistantContent> for completion::AssistantContent {
1230    fn from(value: AssistantContent) -> Self {
1231        match value {
1232            AssistantContent::Refusal { refusal } => {
1233                completion::AssistantContent::Text(Text { text: refusal })
1234            }
1235            AssistantContent::OutputText(Text { text }) => {
1236                completion::AssistantContent::Text(Text { text })
1237            }
1238        }
1239    }
1240}
1241
1242/// The type of assistant content.
1243#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1244#[serde(untagged)]
1245pub enum AssistantContentType {
1246    Text(AssistantContent),
1247    ToolCall(OutputFunctionCall),
1248    Reasoning(OpenAIReasoning),
1249}
1250
1251/// Different types of user content.
1252#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1253#[serde(tag = "type", rename_all = "snake_case")]
1254pub enum UserContent {
1255    InputText {
1256        text: String,
1257    },
1258    InputImage {
1259        image_url: String,
1260        #[serde(default)]
1261        detail: ImageDetail,
1262    },
1263    InputFile {
1264        #[serde(skip_serializing_if = "Option::is_none")]
1265        file_url: Option<String>,
1266        #[serde(skip_serializing_if = "Option::is_none")]
1267        file_data: Option<String>,
1268        #[serde(skip_serializing_if = "Option::is_none")]
1269        filename: Option<String>,
1270    },
1271    Audio {
1272        input_audio: InputAudio,
1273    },
1274    #[serde(rename = "tool")]
1275    ToolResult {
1276        tool_call_id: String,
1277        output: String,
1278    },
1279}
1280
1281impl TryFrom<message::Message> for Vec<Message> {
1282    type Error = message::MessageError;
1283
1284    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1285        match message {
1286            message::Message::User { content } => {
1287                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1288                    .into_iter()
1289                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1290
1291                // If there are messages with both tool results and user content, openai will only
1292                //  handle tool results. It's unlikely that there will be both.
1293                if !tool_results.is_empty() {
1294                    tool_results
1295                        .into_iter()
1296                        .map(|content| match content {
1297                            message::UserContent::ToolResult(message::ToolResult {
1298                                call_id,
1299                                content,
1300                                ..
1301                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1302                                tool_call_id: call_id.expect("The tool call ID should exist"),
1303                                output: {
1304                                    let res = content.first();
1305                                    match res {
1306                                        completion::message::ToolResultContent::Text(Text {
1307                                            text,
1308                                        }) => text,
1309                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1310                                    }
1311                                },
1312                            }),
1313                            _ => unreachable!(),
1314                        })
1315                        .collect::<Result<Vec<_>, _>>()
1316                } else {
1317                    let other_content = other_content
1318                        .into_iter()
1319                        .map(|content| match content {
1320                            message::UserContent::Text(message::Text { text }) => {
1321                                Ok(UserContent::InputText { text })
1322                            }
1323                            message::UserContent::Image(message::Image {
1324                                data,
1325                                detail,
1326                                media_type,
1327                                ..
1328                            }) => {
1329                                let url = match data {
1330                                    DocumentSourceKind::Base64(data) => {
1331                                        let media_type = if let Some(media_type) = media_type {
1332                                            media_type.to_mime_type().to_string()
1333                                        } else {
1334                                            String::new()
1335                                        };
1336                                        format!("data:{media_type};base64,{data}")
1337                                    }
1338                                    DocumentSourceKind::Url(url) => url,
1339                                    DocumentSourceKind::Raw(_) => {
1340                                        return Err(MessageError::ConversionError(
1341                                            "Raw files not supported, encode as base64 first"
1342                                                .into(),
1343                                        ));
1344                                    }
1345                                    doc => {
1346                                        return Err(MessageError::ConversionError(format!(
1347                                            "Unsupported document type: {doc}"
1348                                        )));
1349                                    }
1350                                };
1351
1352                                Ok(UserContent::InputImage {
1353                                    image_url: url,
1354                                    detail: detail.unwrap_or_default(),
1355                                })
1356                            }
1357                            message::UserContent::Document(message::Document {
1358                                media_type: Some(DocumentMediaType::PDF),
1359                                data,
1360                                ..
1361                            }) => {
1362                                let (file_data, file_url) = match data {
1363                                    DocumentSourceKind::Base64(data) => {
1364                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1365                                    }
1366                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1367                                    DocumentSourceKind::Raw(_) => {
1368                                        return Err(MessageError::ConversionError(
1369                                            "Raw files not supported, encode as base64 first"
1370                                                .into(),
1371                                        ));
1372                                    }
1373                                    doc => {
1374                                        return Err(MessageError::ConversionError(format!(
1375                                            "Unsupported document type: {doc}"
1376                                        )));
1377                                    }
1378                                };
1379
1380                                Ok(UserContent::InputFile {
1381                                    file_url,
1382                                    file_data,
1383                                    filename: Some("document.pdf".into()),
1384                                })
1385                            }
1386                            message::UserContent::Document(message::Document {
1387                                data: DocumentSourceKind::Base64(text),
1388                                ..
1389                            }) => Ok(UserContent::InputText { text }),
1390                            message::UserContent::Audio(message::Audio {
1391                                data: DocumentSourceKind::Base64(data),
1392                                media_type,
1393                                ..
1394                            }) => Ok(UserContent::Audio {
1395                                input_audio: InputAudio {
1396                                    data,
1397                                    format: match media_type {
1398                                        Some(media_type) => media_type,
1399                                        None => AudioMediaType::MP3,
1400                                    },
1401                                },
1402                            }),
1403                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1404                                "Audio must be base64 encoded data".into(),
1405                            )),
1406                            _ => unreachable!(),
1407                        })
1408                        .collect::<Result<Vec<_>, _>>()?;
1409
1410                    let other_content = OneOrMany::many(other_content).expect(
1411                        "There must be other content here if there were no tool result content",
1412                    );
1413
1414                    Ok(vec![Message::User {
1415                        content: other_content,
1416                        name: None,
1417                    }])
1418                }
1419            }
1420            message::Message::Assistant { content, id } => {
1421                let assistant_message_id = id;
1422
1423                match content.first() {
1424                    crate::message::AssistantContent::Text(Text { text }) => {
1425                        Ok(vec![Message::Assistant {
1426                            id: assistant_message_id
1427                                .expect("The assistant message ID should exist"),
1428                            status: ToolStatus::Completed,
1429                            content: OneOrMany::one(AssistantContentType::Text(
1430                                AssistantContent::OutputText(Text { text }),
1431                            )),
1432                            name: None,
1433                        }])
1434                    }
1435                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1436                        id,
1437                        call_id,
1438                        function,
1439                    }) => Ok(vec![Message::Assistant {
1440                        content: OneOrMany::one(AssistantContentType::ToolCall(
1441                            OutputFunctionCall {
1442                                call_id: call_id.expect("The call ID should exist"),
1443                                arguments: function.arguments,
1444                                id,
1445                                name: function.name,
1446                                status: ToolStatus::Completed,
1447                            },
1448                        )),
1449                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1450                        name: None,
1451                        status: ToolStatus::Completed,
1452                    }]),
1453                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1454                        id,
1455                        reasoning,
1456                        ..
1457                    }) => Ok(vec![Message::Assistant {
1458                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1459                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1460                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1461                            encrypted_content: None,
1462                            status: Some(ToolStatus::Completed),
1463                        })),
1464                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1465                        name: None,
1466                        status: (ToolStatus::Completed),
1467                    }]),
1468                }
1469            }
1470        }
1471    }
1472}
1473
1474impl FromStr for UserContent {
1475    type Err = Infallible;
1476
1477    fn from_str(s: &str) -> Result<Self, Self::Err> {
1478        Ok(UserContent::InputText {
1479            text: s.to_string(),
1480        })
1481    }
1482}