rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::completion::ToolChoice;
11use super::{Client, responses_api::streaming::StreamingCompletionResponse};
12use super::{InputAudio, SystemContent};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::wasm_compat::{WasmCompatSend, WasmCompatSync};
24use crate::{OneOrMany, completion, message};
25use serde::{Deserialize, Serialize};
26use serde_json::{Map, Value};
27use tracing::{Instrument, Level, enabled, info_span};
28
29use std::convert::Infallible;
30use std::ops::Add;
31use std::str::FromStr;
32
33pub mod streaming;
34
35/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
36/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
37#[derive(Debug, Deserialize, Serialize, Clone)]
38pub struct CompletionRequest {
39    /// Message inputs
40    pub input: OneOrMany<InputItem>,
41    /// The model name
42    pub model: String,
43    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub instructions: Option<String>,
46    /// The maximum number of output tokens.
47    #[serde(skip_serializing_if = "Option::is_none")]
48    pub max_output_tokens: Option<u64>,
49    /// Toggle to true for streaming responses.
50    #[serde(skip_serializing_if = "Option::is_none")]
51    pub stream: Option<bool>,
52    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub temperature: Option<f64>,
55    /// Whether the LLM should be forced to use a tool before returning a response.
56    /// If none provided, the default option is "auto".
57    #[serde(skip_serializing_if = "Option::is_none")]
58    tool_choice: Option<ToolChoice>,
59    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
60    #[serde(skip_serializing_if = "Vec::is_empty")]
61    pub tools: Vec<ResponsesToolDefinition>,
62    /// Additional parameters
63    #[serde(flatten)]
64    pub additional_parameters: AdditionalParameters,
65}
66
67impl CompletionRequest {
68    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
69    where
70        S: Into<String>,
71    {
72        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
73
74        self
75    }
76
77    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
78        self.additional_parameters.reasoning = Some(reasoning);
79
80        self
81    }
82}
83
84/// An input item for [`CompletionRequest`].
85#[derive(Debug, Deserialize, Serialize, Clone)]
86pub struct InputItem {
87    /// The role of an input item/message.
88    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
89    /// Everything else should be None.
90    #[serde(skip_serializing_if = "Option::is_none")]
91    role: Option<Role>,
92    /// The input content itself.
93    #[serde(flatten)]
94    input: InputContent,
95}
96
97/// Message roles. Used by OpenAI Responses API to determine who created a given message.
98#[derive(Debug, Deserialize, Serialize, Clone)]
99#[serde(rename_all = "lowercase")]
100pub enum Role {
101    User,
102    Assistant,
103    System,
104}
105
106/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
107#[derive(Debug, Deserialize, Serialize, Clone)]
108#[serde(tag = "type", rename_all = "snake_case")]
109pub enum InputContent {
110    Message(Message),
111    Reasoning(OpenAIReasoning),
112    FunctionCall(OutputFunctionCall),
113    FunctionCallOutput(ToolResult),
114}
115
116#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
117pub struct OpenAIReasoning {
118    id: String,
119    pub summary: Vec<ReasoningSummary>,
120    pub encrypted_content: Option<String>,
121    #[serde(skip_serializing_if = "Option::is_none")]
122    pub status: Option<ToolStatus>,
123}
124
125#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
126#[serde(tag = "type", rename_all = "snake_case")]
127pub enum ReasoningSummary {
128    SummaryText { text: String },
129}
130
131impl ReasoningSummary {
132    fn new(input: &str) -> Self {
133        Self::SummaryText {
134            text: input.to_string(),
135        }
136    }
137
138    pub fn text(&self) -> String {
139        let ReasoningSummary::SummaryText { text } = self;
140        text.clone()
141    }
142}
143
144/// A tool result.
145#[derive(Debug, Deserialize, Serialize, Clone)]
146pub struct ToolResult {
147    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
148    call_id: String,
149    /// The result of a tool call.
150    output: String,
151    /// The status of a tool call (if used in a completion request, this should always be Completed)
152    status: ToolStatus,
153}
154
155impl From<Message> for InputItem {
156    fn from(value: Message) -> Self {
157        match value {
158            Message::User { .. } => Self {
159                role: Some(Role::User),
160                input: InputContent::Message(value),
161            },
162            Message::Assistant { ref content, .. } => {
163                let role = if content
164                    .clone()
165                    .iter()
166                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
167                {
168                    None
169                } else {
170                    Some(Role::Assistant)
171                };
172                Self {
173                    role,
174                    input: InputContent::Message(value),
175                }
176            }
177            Message::System { .. } => Self {
178                role: Some(Role::System),
179                input: InputContent::Message(value),
180            },
181            Message::ToolResult {
182                tool_call_id,
183                output,
184            } => Self {
185                role: None,
186                input: InputContent::FunctionCallOutput(ToolResult {
187                    call_id: tool_call_id,
188                    output,
189                    status: ToolStatus::Completed,
190                }),
191            },
192        }
193    }
194}
195
196impl TryFrom<crate::completion::Message> for Vec<InputItem> {
197    type Error = CompletionError;
198
199    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
200        match value {
201            crate::completion::Message::User { content } => {
202                let mut items = Vec::new();
203
204                for user_content in content {
205                    match user_content {
206                        crate::message::UserContent::Text(Text { text }) => {
207                            items.push(InputItem {
208                                role: Some(Role::User),
209                                input: InputContent::Message(Message::User {
210                                    content: OneOrMany::one(UserContent::InputText { text }),
211                                    name: None,
212                                }),
213                            });
214                        }
215                        crate::message::UserContent::ToolResult(
216                            crate::completion::message::ToolResult {
217                                call_id,
218                                content: tool_content,
219                                ..
220                            },
221                        ) => {
222                            for tool_result_content in tool_content {
223                                let crate::completion::message::ToolResultContent::Text(Text {
224                                    text,
225                                }) = tool_result_content
226                                else {
227                                    return Err(CompletionError::ProviderError(
228                                        "This thing only supports text!".to_string(),
229                                    ));
230                                };
231                                // let output = serde_json::from_str(&text)?;
232                                items.push(InputItem {
233                                    role: None,
234                                    input: InputContent::FunctionCallOutput(ToolResult {
235                                        call_id: call_id
236                                            .clone()
237                                            .expect("The call ID of this tool should exist!"),
238                                        output: text,
239                                        status: ToolStatus::Completed,
240                                    }),
241                                });
242                            }
243                        }
244                        crate::message::UserContent::Document(Document {
245                            data,
246                            media_type: Some(DocumentMediaType::PDF),
247                            ..
248                        }) => {
249                            let (file_data, file_url) = match data {
250                                DocumentSourceKind::Base64(data) => {
251                                    (Some(format!("data:application/pdf;base64,{data}")), None)
252                                }
253                                DocumentSourceKind::Url(url) => (None, Some(url)),
254                                DocumentSourceKind::Raw(_) => {
255                                    return Err(CompletionError::RequestError(
256                                        "Raw file data not supported, encode as base64 first"
257                                            .into(),
258                                    ));
259                                }
260                                doc => {
261                                    return Err(CompletionError::RequestError(
262                                        format!("Unsupported document type: {doc}").into(),
263                                    ));
264                                }
265                            };
266
267                            items.push(InputItem {
268                                role: Some(Role::User),
269                                input: InputContent::Message(Message::User {
270                                    content: OneOrMany::one(UserContent::InputFile {
271                                        file_data,
272                                        file_url,
273                                        filename: Some("document.pdf".to_string()),
274                                    }),
275                                    name: None,
276                                }),
277                            })
278                        }
279                        // todo: should we ensure this takes into account file size?
280                        crate::message::UserContent::Document(Document {
281                            data: DocumentSourceKind::Base64(text),
282                            ..
283                        }) => items.push(InputItem {
284                            role: Some(Role::User),
285                            input: InputContent::Message(Message::User {
286                                content: OneOrMany::one(UserContent::InputText { text }),
287                                name: None,
288                            }),
289                        }),
290                        crate::message::UserContent::Document(Document {
291                            data: DocumentSourceKind::String(text),
292                            ..
293                        }) => items.push(InputItem {
294                            role: Some(Role::User),
295                            input: InputContent::Message(Message::User {
296                                content: OneOrMany::one(UserContent::InputText { text }),
297                                name: None,
298                            }),
299                        }),
300                        crate::message::UserContent::Image(crate::message::Image {
301                            data,
302                            media_type,
303                            detail,
304                            ..
305                        }) => {
306                            let url = match data {
307                                DocumentSourceKind::Base64(data) => {
308                                    let media_type = if let Some(media_type) = media_type {
309                                        media_type.to_mime_type().to_string()
310                                    } else {
311                                        String::new()
312                                    };
313                                    format!("data:{media_type};base64,{data}")
314                                }
315                                DocumentSourceKind::Url(url) => url,
316                                DocumentSourceKind::Raw(_) => {
317                                    return Err(CompletionError::RequestError(
318                                        "Raw file data not supported, encode as base64 first"
319                                            .into(),
320                                    ));
321                                }
322                                doc => {
323                                    return Err(CompletionError::RequestError(
324                                        format!("Unsupported document type: {doc}").into(),
325                                    ));
326                                }
327                            };
328                            items.push(InputItem {
329                                role: Some(Role::User),
330                                input: InputContent::Message(Message::User {
331                                    content: OneOrMany::one(UserContent::InputImage {
332                                        image_url: url,
333                                        detail: detail.unwrap_or_default(),
334                                    }),
335                                    name: None,
336                                }),
337                            });
338                        }
339                        message => {
340                            return Err(CompletionError::ProviderError(format!(
341                                "Unsupported message: {message:?}"
342                            )));
343                        }
344                    }
345                }
346
347                Ok(items)
348            }
349            crate::completion::Message::Assistant { id, content } => {
350                let mut items = Vec::new();
351
352                for assistant_content in content {
353                    match assistant_content {
354                        crate::message::AssistantContent::Text(Text { text }) => {
355                            let id = id.as_ref().unwrap_or(&String::default()).clone();
356                            items.push(InputItem {
357                                role: Some(Role::Assistant),
358                                input: InputContent::Message(Message::Assistant {
359                                    content: OneOrMany::one(AssistantContentType::Text(
360                                        AssistantContent::OutputText(Text { text }),
361                                    )),
362                                    id,
363                                    name: None,
364                                    status: ToolStatus::Completed,
365                                }),
366                            });
367                        }
368                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
369                            id: tool_id,
370                            call_id,
371                            function,
372                            ..
373                        }) => {
374                            items.push(InputItem {
375                                role: None,
376                                input: InputContent::FunctionCall(OutputFunctionCall {
377                                    arguments: function.arguments,
378                                    call_id: call_id.expect("The tool call ID should exist!"),
379                                    id: tool_id,
380                                    name: function.name,
381                                    status: ToolStatus::Completed,
382                                }),
383                            });
384                        }
385                        crate::message::AssistantContent::Reasoning(
386                            crate::message::Reasoning { id, reasoning, .. },
387                        ) => {
388                            items.push(InputItem {
389                                role: None,
390                                input: InputContent::Reasoning(OpenAIReasoning {
391                                    id: id
392                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
393                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
394                                    encrypted_content: None,
395                                    status: None,
396                                }),
397                            });
398                        }
399                        crate::message::AssistantContent::Image(_) => {
400                            return Err(CompletionError::ProviderError(
401                                "Assistant image content is not supported in OpenAI Responses API"
402                                    .to_string(),
403                            ));
404                        }
405                    }
406                }
407
408                Ok(items)
409            }
410        }
411    }
412}
413
414impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
415    fn from(value: OneOrMany<String>) -> Self {
416        value.iter().map(|x| ReasoningSummary::new(x)).collect()
417    }
418}
419
420/// The definition of a tool response, repurposed for OpenAI's Responses API.
421#[derive(Debug, Deserialize, Serialize, Clone)]
422pub struct ResponsesToolDefinition {
423    /// Tool name
424    pub name: String,
425    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
426    pub parameters: serde_json::Value,
427    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
428    pub strict: bool,
429    /// The type of tool. This should always be "function".
430    #[serde(rename = "type")]
431    pub kind: String,
432    /// Tool description.
433    pub description: String,
434}
435
436impl From<completion::ToolDefinition> for ResponsesToolDefinition {
437    fn from(value: completion::ToolDefinition) -> Self {
438        let completion::ToolDefinition {
439            name,
440            mut parameters,
441            description,
442        } = value;
443
444        super::sanitize_schema(&mut parameters);
445
446        Self {
447            name,
448            parameters,
449            description,
450            kind: "function".to_string(),
451            strict: true,
452        }
453    }
454}
455
456/// Token usage.
457/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
458#[derive(Clone, Debug, Serialize, Deserialize)]
459pub struct ResponsesUsage {
460    /// Input tokens
461    pub input_tokens: u64,
462    /// In-depth detail on input tokens (cached tokens)
463    #[serde(skip_serializing_if = "Option::is_none")]
464    pub input_tokens_details: Option<InputTokensDetails>,
465    /// Output tokens
466    pub output_tokens: u64,
467    /// In-depth detail on output tokens (reasoning tokens)
468    pub output_tokens_details: OutputTokensDetails,
469    /// Total tokens used (for a given prompt)
470    pub total_tokens: u64,
471}
472
473impl ResponsesUsage {
474    /// Create a new ResponsesUsage instance
475    pub(crate) fn new() -> Self {
476        Self {
477            input_tokens: 0,
478            input_tokens_details: Some(InputTokensDetails::new()),
479            output_tokens: 0,
480            output_tokens_details: OutputTokensDetails::new(),
481            total_tokens: 0,
482        }
483    }
484}
485
486impl Add for ResponsesUsage {
487    type Output = Self;
488
489    fn add(self, rhs: Self) -> Self::Output {
490        let input_tokens = self.input_tokens + rhs.input_tokens;
491        let input_tokens_details = self.input_tokens_details.map(|lhs| {
492            if let Some(tokens) = rhs.input_tokens_details {
493                lhs + tokens
494            } else {
495                lhs
496            }
497        });
498        let output_tokens = self.output_tokens + rhs.output_tokens;
499        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
500        let total_tokens = self.total_tokens + rhs.total_tokens;
501        Self {
502            input_tokens,
503            input_tokens_details,
504            output_tokens,
505            output_tokens_details,
506            total_tokens,
507        }
508    }
509}
510
511/// In-depth details on input tokens.
512#[derive(Clone, Debug, Serialize, Deserialize)]
513pub struct InputTokensDetails {
514    /// Cached tokens from OpenAI
515    pub cached_tokens: u64,
516}
517
518impl InputTokensDetails {
519    pub(crate) fn new() -> Self {
520        Self { cached_tokens: 0 }
521    }
522}
523
524impl Add for InputTokensDetails {
525    type Output = Self;
526    fn add(self, rhs: Self) -> Self::Output {
527        Self {
528            cached_tokens: self.cached_tokens + rhs.cached_tokens,
529        }
530    }
531}
532
533/// In-depth details on output tokens.
534#[derive(Clone, Debug, Serialize, Deserialize)]
535pub struct OutputTokensDetails {
536    /// Reasoning tokens
537    pub reasoning_tokens: u64,
538}
539
540impl OutputTokensDetails {
541    pub(crate) fn new() -> Self {
542        Self {
543            reasoning_tokens: 0,
544        }
545    }
546}
547
548impl Add for OutputTokensDetails {
549    type Output = Self;
550    fn add(self, rhs: Self) -> Self::Output {
551        Self {
552            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
553        }
554    }
555}
556
557/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
558#[derive(Clone, Debug, Default, Serialize, Deserialize)]
559pub struct IncompleteDetailsReason {
560    /// The reason for an incomplete [`CompletionResponse`].
561    pub reason: String,
562}
563
564/// A response error from OpenAI's Response API.
565#[derive(Clone, Debug, Default, Serialize, Deserialize)]
566pub struct ResponseError {
567    /// Error code
568    pub code: String,
569    /// Error message
570    pub message: String,
571}
572
573/// A response object as an enum (ensures type validation)
574#[derive(Clone, Debug, Deserialize, Serialize)]
575#[serde(rename_all = "snake_case")]
576pub enum ResponseObject {
577    Response,
578}
579
580/// The response status as an enum (ensures type validation)
581#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
582#[serde(rename_all = "snake_case")]
583pub enum ResponseStatus {
584    InProgress,
585    Completed,
586    Failed,
587    Cancelled,
588    Queued,
589    Incomplete,
590}
591
592/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
593impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
594    type Error = CompletionError;
595    fn try_from(
596        (model, req): (String, crate::completion::CompletionRequest),
597    ) -> Result<Self, Self::Error> {
598        let input = {
599            let mut partial_history = vec![];
600            if let Some(docs) = req.normalized_documents() {
601                partial_history.push(docs);
602            }
603            partial_history.extend(req.chat_history);
604
605            // Initialize full history with preamble (or empty if non-existent)
606            let mut full_history: Vec<InputItem> = Vec::new();
607
608            // Convert and extend the rest of the history
609            full_history.extend(
610                partial_history
611                    .into_iter()
612                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
613                    .collect::<Vec<Vec<InputItem>>>()
614                    .into_iter()
615                    .flatten()
616                    .collect::<Vec<InputItem>>(),
617            );
618
619            full_history
620        };
621
622        let input = OneOrMany::many(input)
623            .expect("This should never panic - if it does, please file a bug report");
624
625        let stream = req
626            .additional_params
627            .clone()
628            .unwrap_or(Value::Null)
629            .as_bool();
630
631        let additional_parameters = if let Some(map) = req.additional_params {
632            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
633        } else {
634            // If there's no additional parameters, initialise an empty object
635            AdditionalParameters::default()
636        };
637
638        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
639
640        Ok(Self {
641            input,
642            model,
643            instructions: req.preamble,
644            max_output_tokens: req.max_tokens,
645            stream,
646            tool_choice,
647            tools: req
648                .tools
649                .into_iter()
650                .map(ResponsesToolDefinition::from)
651                .collect(),
652            temperature: req.temperature,
653            additional_parameters,
654        })
655    }
656}
657
658/// The completion model struct for OpenAI's response API.
659#[derive(Clone)]
660pub struct ResponsesCompletionModel<T = reqwest::Client> {
661    /// The OpenAI client
662    pub(crate) client: Client<T>,
663    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
664    pub model: String,
665}
666
667impl<T> ResponsesCompletionModel<T>
668where
669    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
670{
671    /// Creates a new [`ResponsesCompletionModel`].
672    pub fn new(client: Client<T>, model: impl Into<String>) -> Self {
673        Self {
674            client,
675            model: model.into(),
676        }
677    }
678
679    pub fn with_model(client: Client<T>, model: &str) -> Self {
680        Self {
681            client,
682            model: model.to_string(),
683        }
684    }
685
686    /// Use the Completions API instead of Responses.
687    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
688        super::completion::CompletionModel::with_model(self.client.completions_api(), &self.model)
689    }
690
691    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
692    pub(crate) fn create_completion_request(
693        &self,
694        completion_request: crate::completion::CompletionRequest,
695    ) -> Result<CompletionRequest, CompletionError> {
696        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
697
698        Ok(req)
699    }
700}
701
702/// The standard response format from OpenAI's Responses API.
703#[derive(Clone, Debug, Serialize, Deserialize)]
704pub struct CompletionResponse {
705    /// The ID of a completion response.
706    pub id: String,
707    /// The type of the object.
708    pub object: ResponseObject,
709    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
710    pub created_at: u64,
711    /// The status of the response.
712    pub status: ResponseStatus,
713    /// Response error (optional)
714    pub error: Option<ResponseError>,
715    /// Incomplete response details (optional)
716    pub incomplete_details: Option<IncompleteDetailsReason>,
717    /// System prompt/preamble
718    pub instructions: Option<String>,
719    /// The maximum number of tokens the model should output
720    pub max_output_tokens: Option<u64>,
721    /// The model name
722    pub model: String,
723    /// Token usage
724    pub usage: Option<ResponsesUsage>,
725    /// The model output (messages, etc will go here)
726    pub output: Vec<Output>,
727    /// Tools
728    #[serde(default)]
729    pub tools: Vec<ResponsesToolDefinition>,
730    /// Additional parameters
731    #[serde(flatten)]
732    pub additional_parameters: AdditionalParameters,
733}
734
735/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
736/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
737#[derive(Clone, Debug, Deserialize, Serialize, Default)]
738pub struct AdditionalParameters {
739    /// Whether or not a given model task should run in the background (ie a detached process).
740    #[serde(skip_serializing_if = "Option::is_none")]
741    pub background: Option<bool>,
742    /// The text response format. This is where you would add structured outputs (if you want them).
743    #[serde(skip_serializing_if = "Option::is_none")]
744    pub text: Option<TextConfig>,
745    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
746    #[serde(skip_serializing_if = "Option::is_none")]
747    pub include: Option<Vec<Include>>,
748    /// `top_p`. Mutually exclusive with the `temperature` argument.
749    #[serde(skip_serializing_if = "Option::is_none")]
750    pub top_p: Option<f64>,
751    /// Whether or not the response should be truncated.
752    #[serde(skip_serializing_if = "Option::is_none")]
753    pub truncation: Option<TruncationStrategy>,
754    /// The username of the user (that you want to use).
755    #[serde(skip_serializing_if = "Option::is_none")]
756    pub user: Option<String>,
757    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
758    #[serde(skip_serializing_if = "Map::is_empty", default)]
759    pub metadata: serde_json::Map<String, serde_json::Value>,
760    /// Whether or not you want tool calls to run in parallel.
761    #[serde(skip_serializing_if = "Option::is_none")]
762    pub parallel_tool_calls: Option<bool>,
763    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
764    #[serde(skip_serializing_if = "Option::is_none")]
765    pub previous_response_id: Option<String>,
766    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
767    #[serde(skip_serializing_if = "Option::is_none")]
768    pub reasoning: Option<Reasoning>,
769    /// The service tier you're using.
770    #[serde(skip_serializing_if = "Option::is_none")]
771    pub service_tier: Option<OpenAIServiceTier>,
772    /// Whether or not to store the response for later retrieval by API.
773    #[serde(skip_serializing_if = "Option::is_none")]
774    pub store: Option<bool>,
775}
776
777impl AdditionalParameters {
778    pub fn to_json(self) -> serde_json::Value {
779        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
780    }
781}
782
783/// The truncation strategy.
784/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
785/// Otherwise, does nothing (and is disabled by default).
786#[derive(Clone, Debug, Default, Serialize, Deserialize)]
787#[serde(rename_all = "snake_case")]
788pub enum TruncationStrategy {
789    Auto,
790    #[default]
791    Disabled,
792}
793
794/// The model output format configuration.
795/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
796#[derive(Clone, Debug, Serialize, Deserialize)]
797pub struct TextConfig {
798    pub format: TextFormat,
799}
800
801impl TextConfig {
802    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
803    where
804        S: Into<String>,
805    {
806        Self {
807            format: TextFormat::JsonSchema(StructuredOutputsInput {
808                name: name.into(),
809                schema,
810                strict: true,
811            }),
812        }
813    }
814}
815
816/// The text format (contained by [`TextConfig`]).
817/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
818#[derive(Clone, Debug, Serialize, Deserialize, Default)]
819#[serde(tag = "type")]
820#[serde(rename_all = "snake_case")]
821pub enum TextFormat {
822    JsonSchema(StructuredOutputsInput),
823    #[default]
824    Text,
825}
826
827/// The inputs required for adding structured outputs.
828#[derive(Clone, Debug, Serialize, Deserialize)]
829pub struct StructuredOutputsInput {
830    /// The name of your schema.
831    pub name: String,
832    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
833    pub schema: serde_json::Value,
834    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
835    pub strict: bool,
836}
837
838/// Add reasoning to a [`CompletionRequest`].
839#[derive(Clone, Debug, Default, Serialize, Deserialize)]
840pub struct Reasoning {
841    /// How much effort you want the model to put into thinking/reasoning.
842    pub effort: Option<ReasoningEffort>,
843    /// How much effort you want the model to put into writing the reasoning summary.
844    #[serde(skip_serializing_if = "Option::is_none")]
845    pub summary: Option<ReasoningSummaryLevel>,
846}
847
848impl Reasoning {
849    /// Creates a new Reasoning instantiation (with empty values).
850    pub fn new() -> Self {
851        Self {
852            effort: None,
853            summary: None,
854        }
855    }
856
857    /// Adds reasoning effort.
858    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
859        self.effort = Some(reasoning_effort);
860
861        self
862    }
863
864    /// Adds summary level (how detailed the reasoning summary will be).
865    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
866        self.summary = Some(reasoning_summary_level);
867
868        self
869    }
870}
871
872/// The billing service tier that will be used. On auto by default.
873#[derive(Clone, Debug, Default, Serialize, Deserialize)]
874#[serde(rename_all = "snake_case")]
875pub enum OpenAIServiceTier {
876    #[default]
877    Auto,
878    Default,
879    Flex,
880}
881
882/// The amount of reasoning effort that will be used by a given model.
883#[derive(Clone, Debug, Default, Serialize, Deserialize)]
884#[serde(rename_all = "snake_case")]
885pub enum ReasoningEffort {
886    None,
887    Minimal,
888    Low,
889    #[default]
890    Medium,
891    High,
892}
893
894/// The amount of effort that will go into a reasoning summary by a given model.
895#[derive(Clone, Debug, Default, Serialize, Deserialize)]
896#[serde(rename_all = "snake_case")]
897pub enum ReasoningSummaryLevel {
898    #[default]
899    Auto,
900    Concise,
901    Detailed,
902}
903
904/// Results to additionally include in the OpenAI Responses API.
905/// Note that most of these are currently unsupported, but have been added for completeness.
906#[derive(Clone, Debug, Deserialize, Serialize)]
907pub enum Include {
908    #[serde(rename = "file_search_call.results")]
909    FileSearchCallResults,
910    #[serde(rename = "message.input_image.image_url")]
911    MessageInputImageImageUrl,
912    #[serde(rename = "computer_call.output.image_url")]
913    ComputerCallOutputOutputImageUrl,
914    #[serde(rename = "reasoning.encrypted_content")]
915    ReasoningEncryptedContent,
916    #[serde(rename = "code_interpreter_call.outputs")]
917    CodeInterpreterCallOutputs,
918}
919
920/// A currently non-exhaustive list of output types.
921#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
922#[serde(tag = "type")]
923#[serde(rename_all = "snake_case")]
924pub enum Output {
925    Message(OutputMessage),
926    #[serde(alias = "function_call")]
927    FunctionCall(OutputFunctionCall),
928    Reasoning {
929        id: String,
930        summary: Vec<ReasoningSummary>,
931    },
932}
933
934impl From<Output> for Vec<completion::AssistantContent> {
935    fn from(value: Output) -> Self {
936        let res: Vec<completion::AssistantContent> = match value {
937            Output::Message(OutputMessage { content, .. }) => content
938                .into_iter()
939                .map(completion::AssistantContent::from)
940                .collect(),
941            Output::FunctionCall(OutputFunctionCall {
942                id,
943                arguments,
944                call_id,
945                name,
946                ..
947            }) => vec![completion::AssistantContent::tool_call_with_call_id(
948                id, call_id, name, arguments,
949            )],
950            Output::Reasoning { id, summary } => {
951                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
952
953                vec![completion::AssistantContent::Reasoning(
954                    message::Reasoning::multi(summary).with_id(id),
955                )]
956            }
957        };
958
959        res
960    }
961}
962
963#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
964pub struct OutputReasoning {
965    id: String,
966    summary: Vec<ReasoningSummary>,
967    status: ToolStatus,
968}
969
970/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
971#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
972pub struct OutputFunctionCall {
973    pub id: String,
974    #[serde(with = "json_utils::stringified_json")]
975    pub arguments: serde_json::Value,
976    pub call_id: String,
977    pub name: String,
978    pub status: ToolStatus,
979}
980
981/// The status of a given tool.
982#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
983#[serde(rename_all = "snake_case")]
984pub enum ToolStatus {
985    InProgress,
986    Completed,
987    Incomplete,
988}
989
990/// An output message from OpenAI's Responses API.
991#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
992pub struct OutputMessage {
993    /// The message ID. Must be included when sending the message back to OpenAI
994    pub id: String,
995    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
996    pub role: OutputRole,
997    /// The status of the response
998    pub status: ResponseStatus,
999    /// The actual message content
1000    pub content: Vec<AssistantContent>,
1001}
1002
1003/// The role of an output message.
1004#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1005#[serde(rename_all = "snake_case")]
1006pub enum OutputRole {
1007    Assistant,
1008}
1009
1010impl<T> completion::CompletionModel for ResponsesCompletionModel<T>
1011where
1012    T: HttpClientExt
1013        + Clone
1014        + std::fmt::Debug
1015        + Default
1016        + WasmCompatSend
1017        + WasmCompatSync
1018        + 'static,
1019{
1020    type Response = CompletionResponse;
1021    type StreamingResponse = StreamingCompletionResponse;
1022
1023    type Client = super::Client<T>;
1024
1025    fn make(client: &Self::Client, model: impl Into<String>) -> Self {
1026        Self::new(client.clone(), model)
1027    }
1028
1029    async fn completion(
1030        &self,
1031        completion_request: crate::completion::CompletionRequest,
1032    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1033        let span = if tracing::Span::current().is_disabled() {
1034            info_span!(
1035                target: "rig::completions",
1036                "chat",
1037                gen_ai.operation.name = "chat",
1038                gen_ai.provider.name = tracing::field::Empty,
1039                gen_ai.request.model = tracing::field::Empty,
1040                gen_ai.response.id = tracing::field::Empty,
1041                gen_ai.response.model = tracing::field::Empty,
1042                gen_ai.usage.output_tokens = tracing::field::Empty,
1043                gen_ai.usage.input_tokens = tracing::field::Empty,
1044                gen_ai.input.messages = tracing::field::Empty,
1045                gen_ai.output.messages = tracing::field::Empty,
1046            )
1047        } else {
1048            tracing::Span::current()
1049        };
1050
1051        span.record("gen_ai.provider.name", "openai");
1052        span.record("gen_ai.request.model", &self.model);
1053        let request = self.create_completion_request(completion_request)?;
1054        let body = serde_json::to_vec(&request)?;
1055
1056        if enabled!(Level::TRACE) {
1057            tracing::trace!(
1058                target: "rig::completions",
1059                "OpenAI Responses completion request: {request}",
1060                request = serde_json::to_string_pretty(&request)?
1061            );
1062        }
1063
1064        let req = self
1065            .client
1066            .post("/responses")?
1067            .body(body)
1068            .map_err(|e| CompletionError::HttpError(e.into()))?;
1069
1070        async move {
1071            let response = self.client.send(req).await?;
1072
1073            if response.status().is_success() {
1074                let t = http_client::text(response).await?;
1075                let response = serde_json::from_str::<Self::Response>(&t)?;
1076                let span = tracing::Span::current();
1077                span.record("gen_ai.response.id", &response.id);
1078                span.record("gen_ai.response.model", &response.model);
1079                if let Some(ref usage) = response.usage {
1080                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1081                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1082                }
1083                if enabled!(Level::TRACE) {
1084                    tracing::trace!(
1085                        target: "rig::completions",
1086                        "OpenAI Responses completion response: {response}",
1087                        response = serde_json::to_string_pretty(&response)?
1088                    );
1089                }
1090                response.try_into()
1091            } else {
1092                let text = http_client::text(response).await?;
1093                Err(CompletionError::ProviderError(text))
1094            }
1095        }
1096        .instrument(span)
1097        .await
1098    }
1099
1100    async fn stream(
1101        &self,
1102        request: crate::completion::CompletionRequest,
1103    ) -> Result<
1104        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1105        CompletionError,
1106    > {
1107        ResponsesCompletionModel::stream(self, request).await
1108    }
1109}
1110
1111impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1112    type Error = CompletionError;
1113
1114    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1115        if response.output.is_empty() {
1116            return Err(CompletionError::ResponseError(
1117                "Response contained no parts".to_owned(),
1118            ));
1119        }
1120
1121        let content: Vec<completion::AssistantContent> = response
1122            .output
1123            .iter()
1124            .cloned()
1125            .flat_map(<Vec<completion::AssistantContent>>::from)
1126            .collect();
1127
1128        let choice = OneOrMany::many(content).map_err(|_| {
1129            CompletionError::ResponseError(
1130                "Response contained no message or tool call (empty)".to_owned(),
1131            )
1132        })?;
1133
1134        let usage = response
1135            .usage
1136            .as_ref()
1137            .map(|usage| completion::Usage {
1138                input_tokens: usage.input_tokens,
1139                output_tokens: usage.output_tokens,
1140                total_tokens: usage.total_tokens,
1141            })
1142            .unwrap_or_default();
1143
1144        Ok(completion::CompletionResponse {
1145            choice,
1146            usage,
1147            raw_response: response,
1148        })
1149    }
1150}
1151
1152/// An OpenAI Responses API message.
1153#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1154#[serde(tag = "role", rename_all = "lowercase")]
1155pub enum Message {
1156    #[serde(alias = "developer")]
1157    System {
1158        #[serde(deserialize_with = "string_or_one_or_many")]
1159        content: OneOrMany<SystemContent>,
1160        #[serde(skip_serializing_if = "Option::is_none")]
1161        name: Option<String>,
1162    },
1163    User {
1164        #[serde(deserialize_with = "string_or_one_or_many")]
1165        content: OneOrMany<UserContent>,
1166        #[serde(skip_serializing_if = "Option::is_none")]
1167        name: Option<String>,
1168    },
1169    Assistant {
1170        content: OneOrMany<AssistantContentType>,
1171        #[serde(skip_serializing_if = "String::is_empty")]
1172        id: String,
1173        #[serde(skip_serializing_if = "Option::is_none")]
1174        name: Option<String>,
1175        status: ToolStatus,
1176    },
1177    #[serde(rename = "tool")]
1178    ToolResult {
1179        tool_call_id: String,
1180        output: String,
1181    },
1182}
1183
1184/// The type of a tool result content item.
1185#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1186#[serde(rename_all = "lowercase")]
1187pub enum ToolResultContentType {
1188    #[default]
1189    Text,
1190}
1191
1192impl Message {
1193    pub fn system(content: &str) -> Self {
1194        Message::System {
1195            content: OneOrMany::one(content.to_owned().into()),
1196            name: None,
1197        }
1198    }
1199}
1200
1201/// Text assistant content.
1202/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1203#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1204#[serde(tag = "type", rename_all = "snake_case")]
1205pub enum AssistantContent {
1206    OutputText(Text),
1207    Refusal { refusal: String },
1208}
1209
1210impl From<AssistantContent> for completion::AssistantContent {
1211    fn from(value: AssistantContent) -> Self {
1212        match value {
1213            AssistantContent::Refusal { refusal } => {
1214                completion::AssistantContent::Text(Text { text: refusal })
1215            }
1216            AssistantContent::OutputText(Text { text }) => {
1217                completion::AssistantContent::Text(Text { text })
1218            }
1219        }
1220    }
1221}
1222
1223/// The type of assistant content.
1224#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1225#[serde(untagged)]
1226pub enum AssistantContentType {
1227    Text(AssistantContent),
1228    ToolCall(OutputFunctionCall),
1229    Reasoning(OpenAIReasoning),
1230}
1231
1232/// Different types of user content.
1233#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1234#[serde(tag = "type", rename_all = "snake_case")]
1235pub enum UserContent {
1236    InputText {
1237        text: String,
1238    },
1239    InputImage {
1240        image_url: String,
1241        #[serde(default)]
1242        detail: ImageDetail,
1243    },
1244    InputFile {
1245        #[serde(skip_serializing_if = "Option::is_none")]
1246        file_url: Option<String>,
1247        #[serde(skip_serializing_if = "Option::is_none")]
1248        file_data: Option<String>,
1249        #[serde(skip_serializing_if = "Option::is_none")]
1250        filename: Option<String>,
1251    },
1252    Audio {
1253        input_audio: InputAudio,
1254    },
1255    #[serde(rename = "tool")]
1256    ToolResult {
1257        tool_call_id: String,
1258        output: String,
1259    },
1260}
1261
1262impl TryFrom<message::Message> for Vec<Message> {
1263    type Error = message::MessageError;
1264
1265    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1266        match message {
1267            message::Message::User { content } => {
1268                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1269                    .into_iter()
1270                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1271
1272                // If there are messages with both tool results and user content, openai will only
1273                //  handle tool results. It's unlikely that there will be both.
1274                if !tool_results.is_empty() {
1275                    tool_results
1276                        .into_iter()
1277                        .map(|content| match content {
1278                            message::UserContent::ToolResult(message::ToolResult {
1279                                call_id,
1280                                content,
1281                                ..
1282                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1283                                tool_call_id: call_id.expect("The tool call ID should exist"),
1284                                output: {
1285                                    let res = content.first();
1286                                    match res {
1287                                        completion::message::ToolResultContent::Text(Text {
1288                                            text,
1289                                        }) => text,
1290                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1291                                    }
1292                                },
1293                            }),
1294                            _ => unreachable!(),
1295                        })
1296                        .collect::<Result<Vec<_>, _>>()
1297                } else {
1298                    let other_content = other_content
1299                        .into_iter()
1300                        .map(|content| match content {
1301                            message::UserContent::Text(message::Text { text }) => {
1302                                Ok(UserContent::InputText { text })
1303                            }
1304                            message::UserContent::Image(message::Image {
1305                                data,
1306                                detail,
1307                                media_type,
1308                                ..
1309                            }) => {
1310                                let url = match data {
1311                                    DocumentSourceKind::Base64(data) => {
1312                                        let media_type = if let Some(media_type) = media_type {
1313                                            media_type.to_mime_type().to_string()
1314                                        } else {
1315                                            String::new()
1316                                        };
1317                                        format!("data:{media_type};base64,{data}")
1318                                    }
1319                                    DocumentSourceKind::Url(url) => url,
1320                                    DocumentSourceKind::Raw(_) => {
1321                                        return Err(MessageError::ConversionError(
1322                                            "Raw files not supported, encode as base64 first"
1323                                                .into(),
1324                                        ));
1325                                    }
1326                                    doc => {
1327                                        return Err(MessageError::ConversionError(format!(
1328                                            "Unsupported document type: {doc}"
1329                                        )));
1330                                    }
1331                                };
1332
1333                                Ok(UserContent::InputImage {
1334                                    image_url: url,
1335                                    detail: detail.unwrap_or_default(),
1336                                })
1337                            }
1338                            message::UserContent::Document(message::Document {
1339                                media_type: Some(DocumentMediaType::PDF),
1340                                data,
1341                                ..
1342                            }) => {
1343                                let (file_data, file_url) = match data {
1344                                    DocumentSourceKind::Base64(data) => {
1345                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1346                                    }
1347                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1348                                    DocumentSourceKind::Raw(_) => {
1349                                        return Err(MessageError::ConversionError(
1350                                            "Raw files not supported, encode as base64 first"
1351                                                .into(),
1352                                        ));
1353                                    }
1354                                    doc => {
1355                                        return Err(MessageError::ConversionError(format!(
1356                                            "Unsupported document type: {doc}"
1357                                        )));
1358                                    }
1359                                };
1360
1361                                Ok(UserContent::InputFile {
1362                                    file_url,
1363                                    file_data,
1364                                    filename: Some("document.pdf".into()),
1365                                })
1366                            }
1367                            message::UserContent::Document(message::Document {
1368                                data: DocumentSourceKind::Base64(text),
1369                                ..
1370                            }) => Ok(UserContent::InputText { text }),
1371                            message::UserContent::Audio(message::Audio {
1372                                data: DocumentSourceKind::Base64(data),
1373                                media_type,
1374                                ..
1375                            }) => Ok(UserContent::Audio {
1376                                input_audio: InputAudio {
1377                                    data,
1378                                    format: match media_type {
1379                                        Some(media_type) => media_type,
1380                                        None => AudioMediaType::MP3,
1381                                    },
1382                                },
1383                            }),
1384                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1385                                "Audio must be base64 encoded data".into(),
1386                            )),
1387                            _ => unreachable!(),
1388                        })
1389                        .collect::<Result<Vec<_>, _>>()?;
1390
1391                    let other_content = OneOrMany::many(other_content).expect(
1392                        "There must be other content here if there were no tool result content",
1393                    );
1394
1395                    Ok(vec![Message::User {
1396                        content: other_content,
1397                        name: None,
1398                    }])
1399                }
1400            }
1401            message::Message::Assistant { content, id } => {
1402                let assistant_message_id = id;
1403
1404                match content.first() {
1405                    crate::message::AssistantContent::Text(Text { text }) => {
1406                        Ok(vec![Message::Assistant {
1407                            id: assistant_message_id
1408                                .expect("The assistant message ID should exist"),
1409                            status: ToolStatus::Completed,
1410                            content: OneOrMany::one(AssistantContentType::Text(
1411                                AssistantContent::OutputText(Text { text }),
1412                            )),
1413                            name: None,
1414                        }])
1415                    }
1416                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1417                        id,
1418                        call_id,
1419                        function,
1420                        ..
1421                    }) => Ok(vec![Message::Assistant {
1422                        content: OneOrMany::one(AssistantContentType::ToolCall(
1423                            OutputFunctionCall {
1424                                call_id: call_id.expect("The call ID should exist"),
1425                                arguments: function.arguments,
1426                                id,
1427                                name: function.name,
1428                                status: ToolStatus::Completed,
1429                            },
1430                        )),
1431                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1432                        name: None,
1433                        status: ToolStatus::Completed,
1434                    }]),
1435                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1436                        id,
1437                        reasoning,
1438                        ..
1439                    }) => Ok(vec![Message::Assistant {
1440                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1441                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1442                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1443                            encrypted_content: None,
1444                            status: Some(ToolStatus::Completed),
1445                        })),
1446                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1447                        name: None,
1448                        status: (ToolStatus::Completed),
1449                    }]),
1450                    crate::message::AssistantContent::Image(_) => {
1451                        Err(MessageError::ConversionError(
1452                            "Assistant image content is not supported in OpenAI Responses API".into(),
1453                        ))
1454                    }
1455                }
1456            }
1457        }
1458    }
1459}
1460
1461impl FromStr for UserContent {
1462    type Err = Infallible;
1463
1464    fn from_str(s: &str) -> Result<Self, Self::Err> {
1465        Ok(UserContent::InputText {
1466            text: s.to_string(),
1467        })
1468    }
1469}