rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::completion::ToolChoice;
11use super::{Client, responses_api::streaming::StreamingCompletionResponse};
12use super::{InputAudio, SystemContent};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::{OneOrMany, completion, message};
24use serde::{Deserialize, Serialize};
25use serde_json::{Map, Value};
26use tracing::{Instrument, info_span};
27
28use std::convert::Infallible;
29use std::ops::Add;
30use std::str::FromStr;
31
32pub mod streaming;
33
34/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
35/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
36#[derive(Debug, Deserialize, Serialize, Clone)]
37pub struct CompletionRequest {
38    /// Message inputs
39    pub input: OneOrMany<InputItem>,
40    /// The model name
41    pub model: String,
42    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
43    #[serde(skip_serializing_if = "Option::is_none")]
44    pub instructions: Option<String>,
45    /// The maximum number of output tokens.
46    #[serde(skip_serializing_if = "Option::is_none")]
47    pub max_output_tokens: Option<u64>,
48    /// Toggle to true for streaming responses.
49    #[serde(skip_serializing_if = "Option::is_none")]
50    pub stream: Option<bool>,
51    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
52    #[serde(skip_serializing_if = "Option::is_none")]
53    pub temperature: Option<f64>,
54    /// Whether the LLM should be forced to use a tool before returning a response.
55    /// If none provided, the default option is "auto".
56    #[serde(skip_serializing_if = "Option::is_none")]
57    tool_choice: Option<ToolChoice>,
58    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
59    #[serde(skip_serializing_if = "Vec::is_empty")]
60    pub tools: Vec<ResponsesToolDefinition>,
61    /// Additional parameters
62    #[serde(flatten)]
63    pub additional_parameters: AdditionalParameters,
64}
65
66impl CompletionRequest {
67    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
68    where
69        S: Into<String>,
70    {
71        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
72
73        self
74    }
75
76    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
77        self.additional_parameters.reasoning = Some(reasoning);
78
79        self
80    }
81}
82
83/// An input item for [`CompletionRequest`].
84#[derive(Debug, Deserialize, Serialize, Clone)]
85pub struct InputItem {
86    /// The role of an input item/message.
87    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
88    /// Everything else should be None.
89    #[serde(skip_serializing_if = "Option::is_none")]
90    role: Option<Role>,
91    /// The input content itself.
92    #[serde(flatten)]
93    input: InputContent,
94}
95
96/// Message roles. Used by OpenAI Responses API to determine who created a given message.
97#[derive(Debug, Deserialize, Serialize, Clone)]
98#[serde(rename_all = "lowercase")]
99pub enum Role {
100    User,
101    Assistant,
102    System,
103}
104
105/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
106#[derive(Debug, Deserialize, Serialize, Clone)]
107#[serde(tag = "type", rename_all = "snake_case")]
108pub enum InputContent {
109    Message(Message),
110    Reasoning(OpenAIReasoning),
111    FunctionCall(OutputFunctionCall),
112    FunctionCallOutput(ToolResult),
113}
114
115#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
116pub struct OpenAIReasoning {
117    id: String,
118    pub summary: Vec<ReasoningSummary>,
119    pub encrypted_content: Option<String>,
120    #[serde(skip_serializing_if = "Option::is_none")]
121    pub status: Option<ToolStatus>,
122}
123
124#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
125#[serde(tag = "type", rename_all = "snake_case")]
126pub enum ReasoningSummary {
127    SummaryText { text: String },
128}
129
130impl ReasoningSummary {
131    fn new(input: &str) -> Self {
132        Self::SummaryText {
133            text: input.to_string(),
134        }
135    }
136
137    pub fn text(&self) -> String {
138        let ReasoningSummary::SummaryText { text } = self;
139        text.clone()
140    }
141}
142
143/// A tool result.
144#[derive(Debug, Deserialize, Serialize, Clone)]
145pub struct ToolResult {
146    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
147    call_id: String,
148    /// The result of a tool call.
149    output: String,
150    /// The status of a tool call (if used in a completion request, this should always be Completed)
151    status: ToolStatus,
152}
153
154impl From<Message> for InputItem {
155    fn from(value: Message) -> Self {
156        match value {
157            Message::User { .. } => Self {
158                role: Some(Role::User),
159                input: InputContent::Message(value),
160            },
161            Message::Assistant { ref content, .. } => {
162                let role = if content
163                    .clone()
164                    .iter()
165                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
166                {
167                    None
168                } else {
169                    Some(Role::Assistant)
170                };
171                Self {
172                    role,
173                    input: InputContent::Message(value),
174                }
175            }
176            Message::System { .. } => Self {
177                role: Some(Role::System),
178                input: InputContent::Message(value),
179            },
180            Message::ToolResult {
181                tool_call_id,
182                output,
183            } => Self {
184                role: None,
185                input: InputContent::FunctionCallOutput(ToolResult {
186                    call_id: tool_call_id,
187                    output,
188                    status: ToolStatus::Completed,
189                }),
190            },
191        }
192    }
193}
194
195impl TryFrom<crate::completion::Message> for Vec<InputItem> {
196    type Error = CompletionError;
197
198    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
199        match value {
200            crate::completion::Message::User { content } => {
201                let mut items = Vec::new();
202
203                for user_content in content {
204                    match user_content {
205                        crate::message::UserContent::Text(Text { text }) => {
206                            items.push(InputItem {
207                                role: Some(Role::User),
208                                input: InputContent::Message(Message::User {
209                                    content: OneOrMany::one(UserContent::InputText { text }),
210                                    name: None,
211                                }),
212                            });
213                        }
214                        crate::message::UserContent::ToolResult(
215                            crate::completion::message::ToolResult {
216                                call_id,
217                                content: tool_content,
218                                ..
219                            },
220                        ) => {
221                            for tool_result_content in tool_content {
222                                let crate::completion::message::ToolResultContent::Text(Text {
223                                    text,
224                                }) = tool_result_content
225                                else {
226                                    return Err(CompletionError::ProviderError(
227                                        "This thing only supports text!".to_string(),
228                                    ));
229                                };
230                                // let output = serde_json::from_str(&text)?;
231                                items.push(InputItem {
232                                    role: None,
233                                    input: InputContent::FunctionCallOutput(ToolResult {
234                                        call_id: call_id
235                                            .clone()
236                                            .expect("The call ID of this tool should exist!"),
237                                        output: text,
238                                        status: ToolStatus::Completed,
239                                    }),
240                                });
241                            }
242                        }
243                        crate::message::UserContent::Document(Document {
244                            data,
245                            media_type: Some(DocumentMediaType::PDF),
246                            ..
247                        }) => {
248                            let (file_data, file_url) = match data {
249                                DocumentSourceKind::Base64(data) => {
250                                    (Some(format!("data:application/pdf;base64,{data}")), None)
251                                }
252                                DocumentSourceKind::Url(url) => (None, Some(url)),
253                                DocumentSourceKind::Raw(_) => {
254                                    return Err(CompletionError::RequestError(
255                                        "Raw file data not supported, encode as base64 first"
256                                            .into(),
257                                    ));
258                                }
259                                doc => {
260                                    return Err(CompletionError::RequestError(
261                                        format!("Unsupported document type: {doc}").into(),
262                                    ));
263                                }
264                            };
265
266                            items.push(InputItem {
267                                role: Some(Role::User),
268                                input: InputContent::Message(Message::User {
269                                    content: OneOrMany::one(UserContent::InputFile {
270                                        file_data,
271                                        file_url,
272                                        filename: Some("document.pdf".to_string()),
273                                    }),
274                                    name: None,
275                                }),
276                            })
277                        }
278                        // todo: should we ensure this takes into account file size?
279                        crate::message::UserContent::Document(Document {
280                            data: DocumentSourceKind::Base64(text),
281                            ..
282                        }) => items.push(InputItem {
283                            role: Some(Role::User),
284                            input: InputContent::Message(Message::User {
285                                content: OneOrMany::one(UserContent::InputText { text }),
286                                name: None,
287                            }),
288                        }),
289                        crate::message::UserContent::Document(Document {
290                            data: DocumentSourceKind::String(text),
291                            ..
292                        }) => items.push(InputItem {
293                            role: Some(Role::User),
294                            input: InputContent::Message(Message::User {
295                                content: OneOrMany::one(UserContent::InputText { text }),
296                                name: None,
297                            }),
298                        }),
299                        crate::message::UserContent::Image(crate::message::Image {
300                            data,
301                            media_type,
302                            detail,
303                            ..
304                        }) => {
305                            let url = match data {
306                                DocumentSourceKind::Base64(data) => {
307                                    let media_type = if let Some(media_type) = media_type {
308                                        media_type.to_mime_type().to_string()
309                                    } else {
310                                        String::new()
311                                    };
312                                    format!("data:{media_type};base64,{data}")
313                                }
314                                DocumentSourceKind::Url(url) => url,
315                                DocumentSourceKind::Raw(_) => {
316                                    return Err(CompletionError::RequestError(
317                                        "Raw file data not supported, encode as base64 first"
318                                            .into(),
319                                    ));
320                                }
321                                doc => {
322                                    return Err(CompletionError::RequestError(
323                                        format!("Unsupported document type: {doc}").into(),
324                                    ));
325                                }
326                            };
327                            items.push(InputItem {
328                                role: Some(Role::User),
329                                input: InputContent::Message(Message::User {
330                                    content: OneOrMany::one(UserContent::InputImage {
331                                        image_url: url,
332                                        detail: detail.unwrap_or_default(),
333                                    }),
334                                    name: None,
335                                }),
336                            });
337                        }
338                        message => {
339                            return Err(CompletionError::ProviderError(format!(
340                                "Unsupported message: {message:?}"
341                            )));
342                        }
343                    }
344                }
345
346                Ok(items)
347            }
348            crate::completion::Message::Assistant { id, content } => {
349                let mut items = Vec::new();
350
351                for assistant_content in content {
352                    match assistant_content {
353                        crate::message::AssistantContent::Text(Text { text }) => {
354                            let id = id.as_ref().unwrap_or(&String::default()).clone();
355                            items.push(InputItem {
356                                role: Some(Role::Assistant),
357                                input: InputContent::Message(Message::Assistant {
358                                    content: OneOrMany::one(AssistantContentType::Text(
359                                        AssistantContent::OutputText(Text { text }),
360                                    )),
361                                    id,
362                                    name: None,
363                                    status: ToolStatus::Completed,
364                                }),
365                            });
366                        }
367                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
368                            id: tool_id,
369                            call_id,
370                            function,
371                        }) => {
372                            items.push(InputItem {
373                                role: None,
374                                input: InputContent::FunctionCall(OutputFunctionCall {
375                                    arguments: function.arguments,
376                                    call_id: call_id.expect("The tool call ID should exist!"),
377                                    id: tool_id,
378                                    name: function.name,
379                                    status: ToolStatus::Completed,
380                                }),
381                            });
382                        }
383                        crate::message::AssistantContent::Reasoning(
384                            crate::message::Reasoning { id, reasoning, .. },
385                        ) => {
386                            items.push(InputItem {
387                                role: None,
388                                input: InputContent::Reasoning(OpenAIReasoning {
389                                    id: id
390                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
391                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
392                                    encrypted_content: None,
393                                    status: None,
394                                }),
395                            });
396                        }
397                    }
398                }
399
400                Ok(items)
401            }
402        }
403    }
404}
405
406impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
407    fn from(value: OneOrMany<String>) -> Self {
408        value.iter().map(|x| ReasoningSummary::new(x)).collect()
409    }
410}
411
412/// The definition of a tool response, repurposed for OpenAI's Responses API.
413#[derive(Debug, Deserialize, Serialize, Clone)]
414pub struct ResponsesToolDefinition {
415    /// Tool name
416    pub name: String,
417    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
418    pub parameters: serde_json::Value,
419    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
420    pub strict: bool,
421    /// The type of tool. This should always be "function".
422    #[serde(rename = "type")]
423    pub kind: String,
424    /// Tool description.
425    pub description: String,
426}
427
428/// Recursively ensures all object schemas in a JSON schema have `additionalProperties: false`.
429/// Nested arrays, schema $defs, object properties and enums should be handled through this method
430/// This seems to be required by OpenAI's Responses API when using strict mode.
431fn add_props_false(schema: &mut serde_json::Value) {
432    if let Value::Object(obj) = schema {
433        let is_object_schema = obj.get("type") == Some(&Value::String("object".to_string()))
434            || obj.contains_key("properties");
435
436        if is_object_schema && !obj.contains_key("additionalProperties") {
437            obj.insert("additionalProperties".to_string(), Value::Bool(false));
438        }
439
440        if let Some(defs) = obj.get_mut("$defs")
441            && let Value::Object(defs_obj) = defs
442        {
443            for (_, def_schema) in defs_obj.iter_mut() {
444                add_props_false(def_schema);
445            }
446        }
447
448        if let Some(properties) = obj.get_mut("properties")
449            && let Value::Object(props) = properties
450        {
451            for (_, prop_value) in props.iter_mut() {
452                add_props_false(prop_value);
453            }
454        }
455
456        if let Some(items) = obj.get_mut("items") {
457            add_props_false(items);
458        }
459
460        // should handle Enums (anyOf/oneOf)
461        for key in ["anyOf", "oneOf", "allOf"] {
462            if let Some(variants) = obj.get_mut(key)
463                && let Value::Array(variants_array) = variants
464            {
465                for variant in variants_array.iter_mut() {
466                    add_props_false(variant);
467                }
468            }
469        }
470    }
471}
472
473impl From<completion::ToolDefinition> for ResponsesToolDefinition {
474    fn from(value: completion::ToolDefinition) -> Self {
475        let completion::ToolDefinition {
476            name,
477            mut parameters,
478            description,
479        } = value;
480
481        add_props_false(&mut parameters);
482
483        Self {
484            name,
485            parameters,
486            description,
487            kind: "function".to_string(),
488            strict: true,
489        }
490    }
491}
492
493/// Token usage.
494/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
495#[derive(Clone, Debug, Serialize, Deserialize)]
496pub struct ResponsesUsage {
497    /// Input tokens
498    pub input_tokens: u64,
499    /// In-depth detail on input tokens (cached tokens)
500    #[serde(skip_serializing_if = "Option::is_none")]
501    pub input_tokens_details: Option<InputTokensDetails>,
502    /// Output tokens
503    pub output_tokens: u64,
504    /// In-depth detail on output tokens (reasoning tokens)
505    pub output_tokens_details: OutputTokensDetails,
506    /// Total tokens used (for a given prompt)
507    pub total_tokens: u64,
508}
509
510impl ResponsesUsage {
511    /// Create a new ResponsesUsage instance
512    pub(crate) fn new() -> Self {
513        Self {
514            input_tokens: 0,
515            input_tokens_details: Some(InputTokensDetails::new()),
516            output_tokens: 0,
517            output_tokens_details: OutputTokensDetails::new(),
518            total_tokens: 0,
519        }
520    }
521}
522
523impl Add for ResponsesUsage {
524    type Output = Self;
525
526    fn add(self, rhs: Self) -> Self::Output {
527        let input_tokens = self.input_tokens + rhs.input_tokens;
528        let input_tokens_details = self.input_tokens_details.map(|lhs| {
529            if let Some(tokens) = rhs.input_tokens_details {
530                lhs + tokens
531            } else {
532                lhs
533            }
534        });
535        let output_tokens = self.output_tokens + rhs.output_tokens;
536        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
537        let total_tokens = self.total_tokens + rhs.total_tokens;
538        Self {
539            input_tokens,
540            input_tokens_details,
541            output_tokens,
542            output_tokens_details,
543            total_tokens,
544        }
545    }
546}
547
548/// In-depth details on input tokens.
549#[derive(Clone, Debug, Serialize, Deserialize)]
550pub struct InputTokensDetails {
551    /// Cached tokens from OpenAI
552    pub cached_tokens: u64,
553}
554
555impl InputTokensDetails {
556    pub(crate) fn new() -> Self {
557        Self { cached_tokens: 0 }
558    }
559}
560
561impl Add for InputTokensDetails {
562    type Output = Self;
563    fn add(self, rhs: Self) -> Self::Output {
564        Self {
565            cached_tokens: self.cached_tokens + rhs.cached_tokens,
566        }
567    }
568}
569
570/// In-depth details on output tokens.
571#[derive(Clone, Debug, Serialize, Deserialize)]
572pub struct OutputTokensDetails {
573    /// Reasoning tokens
574    pub reasoning_tokens: u64,
575}
576
577impl OutputTokensDetails {
578    pub(crate) fn new() -> Self {
579        Self {
580            reasoning_tokens: 0,
581        }
582    }
583}
584
585impl Add for OutputTokensDetails {
586    type Output = Self;
587    fn add(self, rhs: Self) -> Self::Output {
588        Self {
589            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
590        }
591    }
592}
593
594/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
595#[derive(Clone, Debug, Default, Serialize, Deserialize)]
596pub struct IncompleteDetailsReason {
597    /// The reason for an incomplete [`CompletionResponse`].
598    pub reason: String,
599}
600
601/// A response error from OpenAI's Response API.
602#[derive(Clone, Debug, Default, Serialize, Deserialize)]
603pub struct ResponseError {
604    /// Error code
605    pub code: String,
606    /// Error message
607    pub message: String,
608}
609
610/// A response object as an enum (ensures type validation)
611#[derive(Clone, Debug, Deserialize, Serialize)]
612#[serde(rename_all = "snake_case")]
613pub enum ResponseObject {
614    Response,
615}
616
617/// The response status as an enum (ensures type validation)
618#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
619#[serde(rename_all = "snake_case")]
620pub enum ResponseStatus {
621    InProgress,
622    Completed,
623    Failed,
624    Cancelled,
625    Queued,
626    Incomplete,
627}
628
629/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
630impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
631    type Error = CompletionError;
632    fn try_from(
633        (model, req): (String, crate::completion::CompletionRequest),
634    ) -> Result<Self, Self::Error> {
635        let input = {
636            let mut partial_history = vec![];
637            if let Some(docs) = req.normalized_documents() {
638                partial_history.push(docs);
639            }
640            partial_history.extend(req.chat_history);
641
642            // Initialize full history with preamble (or empty if non-existent)
643            let mut full_history: Vec<InputItem> = Vec::new();
644
645            // Convert and extend the rest of the history
646            full_history.extend(
647                partial_history
648                    .into_iter()
649                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
650                    .collect::<Vec<Vec<InputItem>>>()
651                    .into_iter()
652                    .flatten()
653                    .collect::<Vec<InputItem>>(),
654            );
655
656            full_history
657        };
658
659        let input = OneOrMany::many(input)
660            .expect("This should never panic - if it does, please file a bug report");
661
662        let stream = req
663            .additional_params
664            .clone()
665            .unwrap_or(Value::Null)
666            .as_bool();
667
668        let additional_parameters = if let Some(map) = req.additional_params {
669            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
670        } else {
671            // If there's no additional parameters, initialise an empty object
672            AdditionalParameters::default()
673        };
674
675        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
676
677        Ok(Self {
678            input,
679            model,
680            instructions: req.preamble,
681            max_output_tokens: req.max_tokens,
682            stream,
683            tool_choice,
684            tools: req
685                .tools
686                .into_iter()
687                .map(ResponsesToolDefinition::from)
688                .collect(),
689            temperature: req.temperature,
690            additional_parameters,
691        })
692    }
693}
694
695/// The completion model struct for OpenAI's response API.
696#[derive(Clone)]
697pub struct ResponsesCompletionModel<T = reqwest::Client> {
698    /// The OpenAI client
699    pub(crate) client: Client<T>,
700    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
701    pub model: String,
702}
703
704impl<T> ResponsesCompletionModel<T>
705where
706    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
707{
708    /// Creates a new [`ResponsesCompletionModel`].
709    pub fn new(client: Client<T>, model: &str) -> Self {
710        Self {
711            client,
712            model: model.to_string(),
713        }
714    }
715
716    /// Use the Completions API instead of Responses.
717    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
718        crate::providers::openai::completion::CompletionModel::new(self.client, &self.model)
719    }
720
721    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
722    pub(crate) fn create_completion_request(
723        &self,
724        completion_request: crate::completion::CompletionRequest,
725    ) -> Result<CompletionRequest, CompletionError> {
726        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
727
728        Ok(req)
729    }
730}
731
732/// The standard response format from OpenAI's Responses API.
733#[derive(Clone, Debug, Serialize, Deserialize)]
734pub struct CompletionResponse {
735    /// The ID of a completion response.
736    pub id: String,
737    /// The type of the object.
738    pub object: ResponseObject,
739    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
740    pub created_at: u64,
741    /// The status of the response.
742    pub status: ResponseStatus,
743    /// Response error (optional)
744    pub error: Option<ResponseError>,
745    /// Incomplete response details (optional)
746    pub incomplete_details: Option<IncompleteDetailsReason>,
747    /// System prompt/preamble
748    pub instructions: Option<String>,
749    /// The maximum number of tokens the model should output
750    pub max_output_tokens: Option<u64>,
751    /// The model name
752    pub model: String,
753    /// Token usage
754    pub usage: Option<ResponsesUsage>,
755    /// The model output (messages, etc will go here)
756    pub output: Vec<Output>,
757    /// Tools
758    #[serde(default)]
759    pub tools: Vec<ResponsesToolDefinition>,
760    /// Additional parameters
761    #[serde(flatten)]
762    pub additional_parameters: AdditionalParameters,
763}
764
765/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
766/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
767#[derive(Clone, Debug, Deserialize, Serialize, Default)]
768pub struct AdditionalParameters {
769    /// Whether or not a given model task should run in the background (ie a detached process).
770    #[serde(skip_serializing_if = "Option::is_none")]
771    pub background: Option<bool>,
772    /// The text response format. This is where you would add structured outputs (if you want them).
773    #[serde(skip_serializing_if = "Option::is_none")]
774    pub text: Option<TextConfig>,
775    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
776    #[serde(skip_serializing_if = "Option::is_none")]
777    pub include: Option<Vec<Include>>,
778    /// `top_p`. Mutually exclusive with the `temperature` argument.
779    #[serde(skip_serializing_if = "Option::is_none")]
780    pub top_p: Option<f64>,
781    /// Whether or not the response should be truncated.
782    #[serde(skip_serializing_if = "Option::is_none")]
783    pub truncation: Option<TruncationStrategy>,
784    /// The username of the user (that you want to use).
785    #[serde(skip_serializing_if = "Option::is_none")]
786    pub user: Option<String>,
787    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
788    #[serde(skip_serializing_if = "Map::is_empty", default)]
789    pub metadata: serde_json::Map<String, serde_json::Value>,
790    /// Whether or not you want tool calls to run in parallel.
791    #[serde(skip_serializing_if = "Option::is_none")]
792    pub parallel_tool_calls: Option<bool>,
793    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
794    #[serde(skip_serializing_if = "Option::is_none")]
795    pub previous_response_id: Option<String>,
796    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
797    #[serde(skip_serializing_if = "Option::is_none")]
798    pub reasoning: Option<Reasoning>,
799    /// The service tier you're using.
800    #[serde(skip_serializing_if = "Option::is_none")]
801    pub service_tier: Option<OpenAIServiceTier>,
802    /// Whether or not to store the response for later retrieval by API.
803    #[serde(skip_serializing_if = "Option::is_none")]
804    pub store: Option<bool>,
805}
806
807impl AdditionalParameters {
808    pub fn to_json(self) -> serde_json::Value {
809        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
810    }
811}
812
813/// The truncation strategy.
814/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
815/// Otherwise, does nothing (and is disabled by default).
816#[derive(Clone, Debug, Default, Serialize, Deserialize)]
817#[serde(rename_all = "snake_case")]
818pub enum TruncationStrategy {
819    Auto,
820    #[default]
821    Disabled,
822}
823
824/// The model output format configuration.
825/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
826#[derive(Clone, Debug, Serialize, Deserialize)]
827pub struct TextConfig {
828    pub format: TextFormat,
829}
830
831impl TextConfig {
832    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
833    where
834        S: Into<String>,
835    {
836        Self {
837            format: TextFormat::JsonSchema(StructuredOutputsInput {
838                name: name.into(),
839                schema,
840                strict: true,
841            }),
842        }
843    }
844}
845
846/// The text format (contained by [`TextConfig`]).
847/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
848#[derive(Clone, Debug, Serialize, Deserialize, Default)]
849#[serde(tag = "type")]
850#[serde(rename_all = "snake_case")]
851pub enum TextFormat {
852    JsonSchema(StructuredOutputsInput),
853    #[default]
854    Text,
855}
856
857/// The inputs required for adding structured outputs.
858#[derive(Clone, Debug, Serialize, Deserialize)]
859pub struct StructuredOutputsInput {
860    /// The name of your schema.
861    pub name: String,
862    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
863    pub schema: serde_json::Value,
864    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
865    pub strict: bool,
866}
867
868/// Add reasoning to a [`CompletionRequest`].
869#[derive(Clone, Debug, Default, Serialize, Deserialize)]
870pub struct Reasoning {
871    /// How much effort you want the model to put into thinking/reasoning.
872    pub effort: Option<ReasoningEffort>,
873    /// How much effort you want the model to put into writing the reasoning summary.
874    #[serde(skip_serializing_if = "Option::is_none")]
875    pub summary: Option<ReasoningSummaryLevel>,
876}
877
878impl Reasoning {
879    /// Creates a new Reasoning instantiation (with empty values).
880    pub fn new() -> Self {
881        Self {
882            effort: None,
883            summary: None,
884        }
885    }
886
887    /// Adds reasoning effort.
888    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
889        self.effort = Some(reasoning_effort);
890
891        self
892    }
893
894    /// Adds summary level (how detailed the reasoning summary will be).
895    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
896        self.summary = Some(reasoning_summary_level);
897
898        self
899    }
900}
901
902/// The billing service tier that will be used. On auto by default.
903#[derive(Clone, Debug, Default, Serialize, Deserialize)]
904#[serde(rename_all = "snake_case")]
905pub enum OpenAIServiceTier {
906    #[default]
907    Auto,
908    Default,
909    Flex,
910}
911
912/// The amount of reasoning effort that will be used by a given model.
913#[derive(Clone, Debug, Default, Serialize, Deserialize)]
914#[serde(rename_all = "snake_case")]
915pub enum ReasoningEffort {
916    Minimal,
917    Low,
918    #[default]
919    Medium,
920    High,
921}
922
923/// The amount of effort that will go into a reasoning summary by a given model.
924#[derive(Clone, Debug, Default, Serialize, Deserialize)]
925#[serde(rename_all = "snake_case")]
926pub enum ReasoningSummaryLevel {
927    #[default]
928    Auto,
929    Concise,
930    Detailed,
931}
932
933/// Results to additionally include in the OpenAI Responses API.
934/// Note that most of these are currently unsupported, but have been added for completeness.
935#[derive(Clone, Debug, Deserialize, Serialize)]
936pub enum Include {
937    #[serde(rename = "file_search_call.results")]
938    FileSearchCallResults,
939    #[serde(rename = "message.input_image.image_url")]
940    MessageInputImageImageUrl,
941    #[serde(rename = "computer_call.output.image_url")]
942    ComputerCallOutputOutputImageUrl,
943    #[serde(rename = "reasoning.encrypted_content")]
944    ReasoningEncryptedContent,
945    #[serde(rename = "code_interpreter_call.outputs")]
946    CodeInterpreterCallOutputs,
947}
948
949/// A currently non-exhaustive list of output types.
950#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
951#[serde(tag = "type")]
952#[serde(rename_all = "snake_case")]
953pub enum Output {
954    Message(OutputMessage),
955    #[serde(alias = "function_call")]
956    FunctionCall(OutputFunctionCall),
957    Reasoning {
958        id: String,
959        summary: Vec<ReasoningSummary>,
960    },
961}
962
963impl From<Output> for Vec<completion::AssistantContent> {
964    fn from(value: Output) -> Self {
965        let res: Vec<completion::AssistantContent> = match value {
966            Output::Message(OutputMessage { content, .. }) => content
967                .into_iter()
968                .map(completion::AssistantContent::from)
969                .collect(),
970            Output::FunctionCall(OutputFunctionCall {
971                id,
972                arguments,
973                call_id,
974                name,
975                ..
976            }) => vec![completion::AssistantContent::tool_call_with_call_id(
977                id, call_id, name, arguments,
978            )],
979            Output::Reasoning { id, summary } => {
980                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
981
982                vec![completion::AssistantContent::Reasoning(
983                    message::Reasoning::multi(summary).with_id(id),
984                )]
985            }
986        };
987
988        res
989    }
990}
991
992#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
993pub struct OutputReasoning {
994    id: String,
995    summary: Vec<ReasoningSummary>,
996    status: ToolStatus,
997}
998
999/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
1000#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1001pub struct OutputFunctionCall {
1002    pub id: String,
1003    #[serde(with = "json_utils::stringified_json")]
1004    pub arguments: serde_json::Value,
1005    pub call_id: String,
1006    pub name: String,
1007    pub status: ToolStatus,
1008}
1009
1010/// The status of a given tool.
1011#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1012#[serde(rename_all = "snake_case")]
1013pub enum ToolStatus {
1014    InProgress,
1015    Completed,
1016    Incomplete,
1017}
1018
1019/// An output message from OpenAI's Responses API.
1020#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1021pub struct OutputMessage {
1022    /// The message ID. Must be included when sending the message back to OpenAI
1023    pub id: String,
1024    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
1025    pub role: OutputRole,
1026    /// The status of the response
1027    pub status: ResponseStatus,
1028    /// The actual message content
1029    pub content: Vec<AssistantContent>,
1030}
1031
1032/// The role of an output message.
1033#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1034#[serde(rename_all = "snake_case")]
1035pub enum OutputRole {
1036    Assistant,
1037}
1038
1039impl<T> completion::CompletionModel for ResponsesCompletionModel<T>
1040where
1041    T: HttpClientExt + Clone + std::fmt::Debug + Default + Send + 'static,
1042{
1043    type Response = CompletionResponse;
1044    type StreamingResponse = StreamingCompletionResponse;
1045
1046    #[cfg_attr(feature = "worker", worker::send)]
1047    async fn completion(
1048        &self,
1049        completion_request: crate::completion::CompletionRequest,
1050    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1051        let span = if tracing::Span::current().is_disabled() {
1052            info_span!(
1053                target: "rig::completions",
1054                "chat",
1055                gen_ai.operation.name = "chat",
1056                gen_ai.provider.name = tracing::field::Empty,
1057                gen_ai.request.model = tracing::field::Empty,
1058                gen_ai.response.id = tracing::field::Empty,
1059                gen_ai.response.model = tracing::field::Empty,
1060                gen_ai.usage.output_tokens = tracing::field::Empty,
1061                gen_ai.usage.input_tokens = tracing::field::Empty,
1062                gen_ai.input.messages = tracing::field::Empty,
1063                gen_ai.output.messages = tracing::field::Empty,
1064            )
1065        } else {
1066            tracing::Span::current()
1067        };
1068
1069        span.record("gen_ai.provider.name", "openai");
1070        span.record("gen_ai.request.model", &self.model);
1071        let request = self.create_completion_request(completion_request)?;
1072        span.record(
1073            "gen_ai.input.messages",
1074            serde_json::to_string(&request.input)
1075                .expect("openai request to successfully turn into a JSON value"),
1076        );
1077        let body = serde_json::to_vec(&request)?;
1078        tracing::debug!(
1079            "OpenAI Responses API input: {request}",
1080            request = serde_json::to_string_pretty(&request).unwrap()
1081        );
1082
1083        let req = self
1084            .client
1085            .post("/responses")?
1086            .header("Content-Type", "application/json")
1087            .body(body)
1088            .map_err(|e| CompletionError::HttpError(e.into()))?;
1089
1090        async move {
1091            let response = self.client.send(req).await?;
1092
1093            if response.status().is_success() {
1094                let t = http_client::text(response).await?;
1095                let response = serde_json::from_str::<Self::Response>(&t)?;
1096                let span = tracing::Span::current();
1097                span.record(
1098                    "gen_ai.output.messages",
1099                    serde_json::to_string(&response.output).unwrap(),
1100                );
1101                span.record("gen_ai.response.id", &response.id);
1102                span.record("gen_ai.response.model", &response.model);
1103                if let Some(ref usage) = response.usage {
1104                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1105                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1106                }
1107                // We need to call the event here to get the span to actually send anything
1108                tracing::info!("API successfully called");
1109                response.try_into()
1110            } else {
1111                let text = http_client::text(response).await?;
1112                Err(CompletionError::ProviderError(text))
1113            }
1114        }
1115        .instrument(span)
1116        .await
1117    }
1118
1119    #[cfg_attr(feature = "worker", worker::send)]
1120    async fn stream(
1121        &self,
1122        request: crate::completion::CompletionRequest,
1123    ) -> Result<
1124        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1125        CompletionError,
1126    > {
1127        ResponsesCompletionModel::stream(self, request).await
1128    }
1129}
1130
1131impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1132    type Error = CompletionError;
1133
1134    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1135        if response.output.is_empty() {
1136            return Err(CompletionError::ResponseError(
1137                "Response contained no parts".to_owned(),
1138            ));
1139        }
1140
1141        let content: Vec<completion::AssistantContent> = response
1142            .output
1143            .iter()
1144            .cloned()
1145            .flat_map(<Vec<completion::AssistantContent>>::from)
1146            .collect();
1147
1148        let choice = OneOrMany::many(content).map_err(|_| {
1149            CompletionError::ResponseError(
1150                "Response contained no message or tool call (empty)".to_owned(),
1151            )
1152        })?;
1153
1154        let usage = response
1155            .usage
1156            .as_ref()
1157            .map(|usage| completion::Usage {
1158                input_tokens: usage.input_tokens,
1159                output_tokens: usage.output_tokens,
1160                total_tokens: usage.total_tokens,
1161            })
1162            .unwrap_or_default();
1163
1164        Ok(completion::CompletionResponse {
1165            choice,
1166            usage,
1167            raw_response: response,
1168        })
1169    }
1170}
1171
1172/// An OpenAI Responses API message.
1173#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1174#[serde(tag = "role", rename_all = "lowercase")]
1175pub enum Message {
1176    #[serde(alias = "developer")]
1177    System {
1178        #[serde(deserialize_with = "string_or_one_or_many")]
1179        content: OneOrMany<SystemContent>,
1180        #[serde(skip_serializing_if = "Option::is_none")]
1181        name: Option<String>,
1182    },
1183    User {
1184        #[serde(deserialize_with = "string_or_one_or_many")]
1185        content: OneOrMany<UserContent>,
1186        #[serde(skip_serializing_if = "Option::is_none")]
1187        name: Option<String>,
1188    },
1189    Assistant {
1190        content: OneOrMany<AssistantContentType>,
1191        #[serde(skip_serializing_if = "String::is_empty")]
1192        id: String,
1193        #[serde(skip_serializing_if = "Option::is_none")]
1194        name: Option<String>,
1195        status: ToolStatus,
1196    },
1197    #[serde(rename = "tool")]
1198    ToolResult {
1199        tool_call_id: String,
1200        output: String,
1201    },
1202}
1203
1204/// The type of a tool result content item.
1205#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1206#[serde(rename_all = "lowercase")]
1207pub enum ToolResultContentType {
1208    #[default]
1209    Text,
1210}
1211
1212impl Message {
1213    pub fn system(content: &str) -> Self {
1214        Message::System {
1215            content: OneOrMany::one(content.to_owned().into()),
1216            name: None,
1217        }
1218    }
1219}
1220
1221/// Text assistant content.
1222/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1223#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1224#[serde(tag = "type", rename_all = "snake_case")]
1225pub enum AssistantContent {
1226    OutputText(Text),
1227    Refusal { refusal: String },
1228}
1229
1230impl From<AssistantContent> for completion::AssistantContent {
1231    fn from(value: AssistantContent) -> Self {
1232        match value {
1233            AssistantContent::Refusal { refusal } => {
1234                completion::AssistantContent::Text(Text { text: refusal })
1235            }
1236            AssistantContent::OutputText(Text { text }) => {
1237                completion::AssistantContent::Text(Text { text })
1238            }
1239        }
1240    }
1241}
1242
1243/// The type of assistant content.
1244#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1245#[serde(untagged)]
1246pub enum AssistantContentType {
1247    Text(AssistantContent),
1248    ToolCall(OutputFunctionCall),
1249    Reasoning(OpenAIReasoning),
1250}
1251
1252/// Different types of user content.
1253#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1254#[serde(tag = "type", rename_all = "snake_case")]
1255pub enum UserContent {
1256    InputText {
1257        text: String,
1258    },
1259    InputImage {
1260        image_url: String,
1261        #[serde(default)]
1262        detail: ImageDetail,
1263    },
1264    InputFile {
1265        #[serde(skip_serializing_if = "Option::is_none")]
1266        file_url: Option<String>,
1267        #[serde(skip_serializing_if = "Option::is_none")]
1268        file_data: Option<String>,
1269        #[serde(skip_serializing_if = "Option::is_none")]
1270        filename: Option<String>,
1271    },
1272    Audio {
1273        input_audio: InputAudio,
1274    },
1275    #[serde(rename = "tool")]
1276    ToolResult {
1277        tool_call_id: String,
1278        output: String,
1279    },
1280}
1281
1282impl TryFrom<message::Message> for Vec<Message> {
1283    type Error = message::MessageError;
1284
1285    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1286        match message {
1287            message::Message::User { content } => {
1288                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1289                    .into_iter()
1290                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1291
1292                // If there are messages with both tool results and user content, openai will only
1293                //  handle tool results. It's unlikely that there will be both.
1294                if !tool_results.is_empty() {
1295                    tool_results
1296                        .into_iter()
1297                        .map(|content| match content {
1298                            message::UserContent::ToolResult(message::ToolResult {
1299                                call_id,
1300                                content,
1301                                ..
1302                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1303                                tool_call_id: call_id.expect("The tool call ID should exist"),
1304                                output: {
1305                                    let res = content.first();
1306                                    match res {
1307                                        completion::message::ToolResultContent::Text(Text {
1308                                            text,
1309                                        }) => text,
1310                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1311                                    }
1312                                },
1313                            }),
1314                            _ => unreachable!(),
1315                        })
1316                        .collect::<Result<Vec<_>, _>>()
1317                } else {
1318                    let other_content = other_content
1319                        .into_iter()
1320                        .map(|content| match content {
1321                            message::UserContent::Text(message::Text { text }) => {
1322                                Ok(UserContent::InputText { text })
1323                            }
1324                            message::UserContent::Image(message::Image {
1325                                data,
1326                                detail,
1327                                media_type,
1328                                ..
1329                            }) => {
1330                                let url = match data {
1331                                    DocumentSourceKind::Base64(data) => {
1332                                        let media_type = if let Some(media_type) = media_type {
1333                                            media_type.to_mime_type().to_string()
1334                                        } else {
1335                                            String::new()
1336                                        };
1337                                        format!("data:{media_type};base64,{data}")
1338                                    }
1339                                    DocumentSourceKind::Url(url) => url,
1340                                    DocumentSourceKind::Raw(_) => {
1341                                        return Err(MessageError::ConversionError(
1342                                            "Raw files not supported, encode as base64 first"
1343                                                .into(),
1344                                        ));
1345                                    }
1346                                    doc => {
1347                                        return Err(MessageError::ConversionError(format!(
1348                                            "Unsupported document type: {doc}"
1349                                        )));
1350                                    }
1351                                };
1352
1353                                Ok(UserContent::InputImage {
1354                                    image_url: url,
1355                                    detail: detail.unwrap_or_default(),
1356                                })
1357                            }
1358                            message::UserContent::Document(message::Document {
1359                                media_type: Some(DocumentMediaType::PDF),
1360                                data,
1361                                ..
1362                            }) => {
1363                                let (file_data, file_url) = match data {
1364                                    DocumentSourceKind::Base64(data) => {
1365                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1366                                    }
1367                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1368                                    DocumentSourceKind::Raw(_) => {
1369                                        return Err(MessageError::ConversionError(
1370                                            "Raw files not supported, encode as base64 first"
1371                                                .into(),
1372                                        ));
1373                                    }
1374                                    doc => {
1375                                        return Err(MessageError::ConversionError(format!(
1376                                            "Unsupported document type: {doc}"
1377                                        )));
1378                                    }
1379                                };
1380
1381                                Ok(UserContent::InputFile {
1382                                    file_url,
1383                                    file_data,
1384                                    filename: Some("document.pdf".into()),
1385                                })
1386                            }
1387                            message::UserContent::Document(message::Document {
1388                                data: DocumentSourceKind::Base64(text),
1389                                ..
1390                            }) => Ok(UserContent::InputText { text }),
1391                            message::UserContent::Audio(message::Audio {
1392                                data: DocumentSourceKind::Base64(data),
1393                                media_type,
1394                                ..
1395                            }) => Ok(UserContent::Audio {
1396                                input_audio: InputAudio {
1397                                    data,
1398                                    format: match media_type {
1399                                        Some(media_type) => media_type,
1400                                        None => AudioMediaType::MP3,
1401                                    },
1402                                },
1403                            }),
1404                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1405                                "Audio must be base64 encoded data".into(),
1406                            )),
1407                            _ => unreachable!(),
1408                        })
1409                        .collect::<Result<Vec<_>, _>>()?;
1410
1411                    let other_content = OneOrMany::many(other_content).expect(
1412                        "There must be other content here if there were no tool result content",
1413                    );
1414
1415                    Ok(vec![Message::User {
1416                        content: other_content,
1417                        name: None,
1418                    }])
1419                }
1420            }
1421            message::Message::Assistant { content, id } => {
1422                let assistant_message_id = id;
1423
1424                match content.first() {
1425                    crate::message::AssistantContent::Text(Text { text }) => {
1426                        Ok(vec![Message::Assistant {
1427                            id: assistant_message_id
1428                                .expect("The assistant message ID should exist"),
1429                            status: ToolStatus::Completed,
1430                            content: OneOrMany::one(AssistantContentType::Text(
1431                                AssistantContent::OutputText(Text { text }),
1432                            )),
1433                            name: None,
1434                        }])
1435                    }
1436                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1437                        id,
1438                        call_id,
1439                        function,
1440                    }) => Ok(vec![Message::Assistant {
1441                        content: OneOrMany::one(AssistantContentType::ToolCall(
1442                            OutputFunctionCall {
1443                                call_id: call_id.expect("The call ID should exist"),
1444                                arguments: function.arguments,
1445                                id,
1446                                name: function.name,
1447                                status: ToolStatus::Completed,
1448                            },
1449                        )),
1450                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1451                        name: None,
1452                        status: ToolStatus::Completed,
1453                    }]),
1454                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1455                        id,
1456                        reasoning,
1457                        ..
1458                    }) => Ok(vec![Message::Assistant {
1459                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1460                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1461                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1462                            encrypted_content: None,
1463                            status: Some(ToolStatus::Completed),
1464                        })),
1465                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1466                        name: None,
1467                        status: (ToolStatus::Completed),
1468                    }]),
1469                }
1470            }
1471        }
1472    }
1473}
1474
1475impl FromStr for UserContent {
1476    type Err = Infallible;
1477
1478    fn from_str(s: &str) -> Result<Self, Self::Err> {
1479        Ok(UserContent::InputText {
1480            text: s.to_string(),
1481        })
1482    }
1483}