rig/providers/openai/responses_api/
mod.rs

1//! The OpenAI Responses API.
2//!
3//! By default when creating a completion client, this is the API that gets used.
4//!
5//! If you'd like to switch back to the regular Completions API, you can do so by using the `.completions_api()` function - see below for an example:
6//! ```rust
7//! let openai_client = rig::providers::openai::Client::from_env();
8//! let model = openai_client.completion_model("gpt-4o").completions_api();
9//! ```
10use super::completion::ToolChoice;
11use super::{Client, responses_api::streaming::StreamingCompletionResponse};
12use super::{InputAudio, SystemContent};
13use crate::completion::CompletionError;
14use crate::http_client;
15use crate::http_client::HttpClientExt;
16use crate::json_utils;
17use crate::message::{
18    AudioMediaType, Document, DocumentMediaType, DocumentSourceKind, ImageDetail, MessageError,
19    MimeType, Text,
20};
21use crate::one_or_many::string_or_one_or_many;
22
23use crate::{OneOrMany, completion, message};
24use serde::{Deserialize, Serialize};
25use serde_json::{Map, Value};
26use tracing::{Instrument, info_span};
27
28use std::convert::Infallible;
29use std::ops::Add;
30use std::str::FromStr;
31
32pub mod streaming;
33
34/// The completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
35/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
36#[derive(Debug, Deserialize, Serialize, Clone)]
37pub struct CompletionRequest {
38    /// Message inputs
39    pub input: OneOrMany<InputItem>,
40    /// The model name
41    pub model: String,
42    /// Instructions (also referred to as preamble, although in other APIs this would be the "system prompt")
43    #[serde(skip_serializing_if = "Option::is_none")]
44    pub instructions: Option<String>,
45    /// The maximum number of output tokens.
46    #[serde(skip_serializing_if = "Option::is_none")]
47    pub max_output_tokens: Option<u64>,
48    /// Toggle to true for streaming responses.
49    #[serde(skip_serializing_if = "Option::is_none")]
50    pub stream: Option<bool>,
51    /// The temperature. Set higher (up to a max of 1.0) for more creative responses.
52    #[serde(skip_serializing_if = "Option::is_none")]
53    pub temperature: Option<f64>,
54    /// Whether the LLM should be forced to use a tool before returning a response.
55    /// If none provided, the default option is "auto".
56    #[serde(skip_serializing_if = "Option::is_none")]
57    tool_choice: Option<ToolChoice>,
58    /// The tools you want to use. Currently this is limited to functions, but will be expanded on in future.
59    #[serde(skip_serializing_if = "Vec::is_empty")]
60    pub tools: Vec<ResponsesToolDefinition>,
61    /// Additional parameters
62    #[serde(flatten)]
63    pub additional_parameters: AdditionalParameters,
64}
65
66impl CompletionRequest {
67    pub fn with_structured_outputs<S>(mut self, schema_name: S, schema: serde_json::Value) -> Self
68    where
69        S: Into<String>,
70    {
71        self.additional_parameters.text = Some(TextConfig::structured_output(schema_name, schema));
72
73        self
74    }
75
76    pub fn with_reasoning(mut self, reasoning: Reasoning) -> Self {
77        self.additional_parameters.reasoning = Some(reasoning);
78
79        self
80    }
81}
82
83/// An input item for [`CompletionRequest`].
84#[derive(Debug, Deserialize, Serialize, Clone)]
85pub struct InputItem {
86    /// The role of an input item/message.
87    /// Input messages should be Some(Role::User), and output messages should be Some(Role::Assistant).
88    /// Everything else should be None.
89    #[serde(skip_serializing_if = "Option::is_none")]
90    role: Option<Role>,
91    /// The input content itself.
92    #[serde(flatten)]
93    input: InputContent,
94}
95
96/// Message roles. Used by OpenAI Responses API to determine who created a given message.
97#[derive(Debug, Deserialize, Serialize, Clone)]
98#[serde(rename_all = "lowercase")]
99pub enum Role {
100    User,
101    Assistant,
102    System,
103}
104
105/// The type of content used in an [`InputItem`]. Additionally holds data for each type of input content.
106#[derive(Debug, Deserialize, Serialize, Clone)]
107#[serde(tag = "type", rename_all = "snake_case")]
108pub enum InputContent {
109    Message(Message),
110    Reasoning(OpenAIReasoning),
111    FunctionCall(OutputFunctionCall),
112    FunctionCallOutput(ToolResult),
113}
114
115#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
116pub struct OpenAIReasoning {
117    id: String,
118    pub summary: Vec<ReasoningSummary>,
119    pub encrypted_content: Option<String>,
120    #[serde(skip_serializing_if = "Option::is_none")]
121    pub status: Option<ToolStatus>,
122}
123
124#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
125#[serde(tag = "type", rename_all = "snake_case")]
126pub enum ReasoningSummary {
127    SummaryText { text: String },
128}
129
130impl ReasoningSummary {
131    fn new(input: &str) -> Self {
132        Self::SummaryText {
133            text: input.to_string(),
134        }
135    }
136
137    pub fn text(&self) -> String {
138        let ReasoningSummary::SummaryText { text } = self;
139        text.clone()
140    }
141}
142
143/// A tool result.
144#[derive(Debug, Deserialize, Serialize, Clone)]
145pub struct ToolResult {
146    /// The call ID of a tool (this should be linked to the call ID for a tool call, otherwise an error will be received)
147    call_id: String,
148    /// The result of a tool call.
149    output: String,
150    /// The status of a tool call (if used in a completion request, this should always be Completed)
151    status: ToolStatus,
152}
153
154impl From<Message> for InputItem {
155    fn from(value: Message) -> Self {
156        match value {
157            Message::User { .. } => Self {
158                role: Some(Role::User),
159                input: InputContent::Message(value),
160            },
161            Message::Assistant { ref content, .. } => {
162                let role = if content
163                    .clone()
164                    .iter()
165                    .any(|x| matches!(x, AssistantContentType::Reasoning(_)))
166                {
167                    None
168                } else {
169                    Some(Role::Assistant)
170                };
171                Self {
172                    role,
173                    input: InputContent::Message(value),
174                }
175            }
176            Message::System { .. } => Self {
177                role: Some(Role::System),
178                input: InputContent::Message(value),
179            },
180            Message::ToolResult {
181                tool_call_id,
182                output,
183            } => Self {
184                role: None,
185                input: InputContent::FunctionCallOutput(ToolResult {
186                    call_id: tool_call_id,
187                    output,
188                    status: ToolStatus::Completed,
189                }),
190            },
191        }
192    }
193}
194
195impl TryFrom<crate::completion::Message> for Vec<InputItem> {
196    type Error = CompletionError;
197
198    fn try_from(value: crate::completion::Message) -> Result<Self, Self::Error> {
199        match value {
200            crate::completion::Message::User { content } => {
201                let mut items = Vec::new();
202
203                for user_content in content {
204                    match user_content {
205                        crate::message::UserContent::Text(Text { text }) => {
206                            items.push(InputItem {
207                                role: Some(Role::User),
208                                input: InputContent::Message(Message::User {
209                                    content: OneOrMany::one(UserContent::InputText { text }),
210                                    name: None,
211                                }),
212                            });
213                        }
214                        crate::message::UserContent::ToolResult(
215                            crate::completion::message::ToolResult {
216                                call_id,
217                                content: tool_content,
218                                ..
219                            },
220                        ) => {
221                            for tool_result_content in tool_content {
222                                let crate::completion::message::ToolResultContent::Text(Text {
223                                    text,
224                                }) = tool_result_content
225                                else {
226                                    return Err(CompletionError::ProviderError(
227                                        "This thing only supports text!".to_string(),
228                                    ));
229                                };
230                                // let output = serde_json::from_str(&text)?;
231                                items.push(InputItem {
232                                    role: None,
233                                    input: InputContent::FunctionCallOutput(ToolResult {
234                                        call_id: call_id
235                                            .clone()
236                                            .expect("The call ID of this tool should exist!"),
237                                        output: text,
238                                        status: ToolStatus::Completed,
239                                    }),
240                                });
241                            }
242                        }
243                        crate::message::UserContent::Document(Document {
244                            data,
245                            media_type: Some(DocumentMediaType::PDF),
246                            ..
247                        }) => {
248                            let (file_data, file_url) = match data {
249                                DocumentSourceKind::Base64(data) => {
250                                    (Some(format!("data:application/pdf;base64,{data}")), None)
251                                }
252                                DocumentSourceKind::Url(url) => (None, Some(url)),
253                                DocumentSourceKind::Raw(_) => {
254                                    return Err(CompletionError::RequestError(
255                                        "Raw file data not supported, encode as base64 first"
256                                            .into(),
257                                    ));
258                                }
259                                doc => {
260                                    return Err(CompletionError::RequestError(
261                                        format!("Unsupported document type: {doc}").into(),
262                                    ));
263                                }
264                            };
265
266                            items.push(InputItem {
267                                role: Some(Role::User),
268                                input: InputContent::Message(Message::User {
269                                    content: OneOrMany::one(UserContent::InputFile {
270                                        file_data,
271                                        file_url,
272                                        filename: Some("document.pdf".to_string()),
273                                    }),
274                                    name: None,
275                                }),
276                            })
277                        }
278                        // todo: should we ensure this takes into account file size?
279                        crate::message::UserContent::Document(Document {
280                            data: DocumentSourceKind::Base64(text),
281                            ..
282                        }) => items.push(InputItem {
283                            role: Some(Role::User),
284                            input: InputContent::Message(Message::User {
285                                content: OneOrMany::one(UserContent::InputText { text }),
286                                name: None,
287                            }),
288                        }),
289                        crate::message::UserContent::Document(Document {
290                            data: DocumentSourceKind::String(text),
291                            ..
292                        }) => items.push(InputItem {
293                            role: Some(Role::User),
294                            input: InputContent::Message(Message::User {
295                                content: OneOrMany::one(UserContent::InputText { text }),
296                                name: None,
297                            }),
298                        }),
299                        crate::message::UserContent::Image(crate::message::Image {
300                            data,
301                            media_type,
302                            detail,
303                            ..
304                        }) => {
305                            let url = match data {
306                                DocumentSourceKind::Base64(data) => {
307                                    let media_type = if let Some(media_type) = media_type {
308                                        media_type.to_mime_type().to_string()
309                                    } else {
310                                        String::new()
311                                    };
312                                    format!("data:{media_type};base64,{data}")
313                                }
314                                DocumentSourceKind::Url(url) => url,
315                                DocumentSourceKind::Raw(_) => {
316                                    return Err(CompletionError::RequestError(
317                                        "Raw file data not supported, encode as base64 first"
318                                            .into(),
319                                    ));
320                                }
321                                doc => {
322                                    return Err(CompletionError::RequestError(
323                                        format!("Unsupported document type: {doc}").into(),
324                                    ));
325                                }
326                            };
327                            items.push(InputItem {
328                                role: Some(Role::User),
329                                input: InputContent::Message(Message::User {
330                                    content: OneOrMany::one(UserContent::InputImage {
331                                        image_url: url,
332                                        detail: detail.unwrap_or_default(),
333                                    }),
334                                    name: None,
335                                }),
336                            });
337                        }
338                        message => {
339                            return Err(CompletionError::ProviderError(format!(
340                                "Unsupported message: {message:?}"
341                            )));
342                        }
343                    }
344                }
345
346                Ok(items)
347            }
348            crate::completion::Message::Assistant { id, content } => {
349                let mut items = Vec::new();
350
351                for assistant_content in content {
352                    match assistant_content {
353                        crate::message::AssistantContent::Text(Text { text }) => {
354                            let id = id.as_ref().unwrap_or(&String::default()).clone();
355                            items.push(InputItem {
356                                role: Some(Role::Assistant),
357                                input: InputContent::Message(Message::Assistant {
358                                    content: OneOrMany::one(AssistantContentType::Text(
359                                        AssistantContent::OutputText(Text { text }),
360                                    )),
361                                    id,
362                                    name: None,
363                                    status: ToolStatus::Completed,
364                                }),
365                            });
366                        }
367                        crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
368                            id: tool_id,
369                            call_id,
370                            function,
371                        }) => {
372                            items.push(InputItem {
373                                role: None,
374                                input: InputContent::FunctionCall(OutputFunctionCall {
375                                    arguments: function.arguments,
376                                    call_id: call_id.expect("The tool call ID should exist!"),
377                                    id: tool_id,
378                                    name: function.name,
379                                    status: ToolStatus::Completed,
380                                }),
381                            });
382                        }
383                        crate::message::AssistantContent::Reasoning(
384                            crate::message::Reasoning { id, reasoning },
385                        ) => {
386                            items.push(InputItem {
387                                role: None,
388                                input: InputContent::Reasoning(OpenAIReasoning {
389                                    id: id
390                                        .expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
391                                    summary: reasoning.into_iter().map(|x| ReasoningSummary::new(&x)).collect(),
392                                    encrypted_content: None,
393                                    status: None,
394                                }),
395                            });
396                        }
397                    }
398                }
399
400                Ok(items)
401            }
402        }
403    }
404}
405
406impl From<OneOrMany<String>> for Vec<ReasoningSummary> {
407    fn from(value: OneOrMany<String>) -> Self {
408        value.iter().map(|x| ReasoningSummary::new(x)).collect()
409    }
410}
411
412/// The definition of a tool response, repurposed for OpenAI's Responses API.
413#[derive(Debug, Deserialize, Serialize, Clone)]
414pub struct ResponsesToolDefinition {
415    /// Tool name
416    pub name: String,
417    /// Parameters - this should be a JSON schema. Tools should additionally ensure an "additionalParameters" field has been added with the value set to false, as this is required if using OpenAI's strict mode (enabled by default).
418    pub parameters: serde_json::Value,
419    /// Whether to use strict mode. Enabled by default as it allows for improved efficiency.
420    pub strict: bool,
421    /// The type of tool. This should always be "function".
422    #[serde(rename = "type")]
423    pub kind: String,
424    /// Tool description.
425    pub description: String,
426}
427
428/// Recursively ensures all object schemas in a JSON schema have `additionalProperties: false`.
429/// Nested arrays, schema $defs, object properties and enums should be handled through this method
430/// This seems to be required by OpenAI's Responses API when using strict mode.
431fn add_props_false(schema: &mut serde_json::Value) {
432    if let Value::Object(obj) = schema {
433        let is_object_schema = obj.get("type") == Some(&Value::String("object".to_string()))
434            || obj.contains_key("properties");
435
436        if is_object_schema && !obj.contains_key("additionalProperties") {
437            obj.insert("additionalProperties".to_string(), Value::Bool(false));
438        }
439
440        if let Some(defs) = obj.get_mut("$defs")
441            && let Value::Object(defs_obj) = defs
442        {
443            for (_, def_schema) in defs_obj.iter_mut() {
444                add_props_false(def_schema);
445            }
446        }
447
448        if let Some(properties) = obj.get_mut("properties")
449            && let Value::Object(props) = properties
450        {
451            for (_, prop_value) in props.iter_mut() {
452                add_props_false(prop_value);
453            }
454        }
455
456        if let Some(items) = obj.get_mut("items") {
457            add_props_false(items);
458        }
459
460        // should handle Enums (anyOf/oneOf)
461        for key in ["anyOf", "oneOf", "allOf"] {
462            if let Some(variants) = obj.get_mut(key)
463                && let Value::Array(variants_array) = variants
464            {
465                for variant in variants_array.iter_mut() {
466                    add_props_false(variant);
467                }
468            }
469        }
470    }
471}
472
473impl From<completion::ToolDefinition> for ResponsesToolDefinition {
474    fn from(value: completion::ToolDefinition) -> Self {
475        let completion::ToolDefinition {
476            name,
477            mut parameters,
478            description,
479        } = value;
480
481        add_props_false(&mut parameters);
482
483        Self {
484            name,
485            parameters,
486            description,
487            kind: "function".to_string(),
488            strict: true,
489        }
490    }
491}
492
493/// Token usage.
494/// Token usage from the OpenAI Responses API generally shows the input tokens and output tokens (both with more in-depth details) as well as a total tokens field.
495#[derive(Clone, Debug, Serialize, Deserialize)]
496pub struct ResponsesUsage {
497    /// Input tokens
498    pub input_tokens: u64,
499    /// In-depth detail on input tokens (cached tokens)
500    #[serde(skip_serializing_if = "Option::is_none")]
501    pub input_tokens_details: Option<InputTokensDetails>,
502    /// Output tokens
503    pub output_tokens: u64,
504    /// In-depth detail on output tokens (reasoning tokens)
505    pub output_tokens_details: OutputTokensDetails,
506    /// Total tokens used (for a given prompt)
507    pub total_tokens: u64,
508}
509
510impl ResponsesUsage {
511    /// Create a new ResponsesUsage instance
512    pub(crate) fn new() -> Self {
513        Self {
514            input_tokens: 0,
515            input_tokens_details: Some(InputTokensDetails::new()),
516            output_tokens: 0,
517            output_tokens_details: OutputTokensDetails::new(),
518            total_tokens: 0,
519        }
520    }
521}
522
523impl Add for ResponsesUsage {
524    type Output = Self;
525
526    fn add(self, rhs: Self) -> Self::Output {
527        let input_tokens = self.input_tokens + rhs.input_tokens;
528        let input_tokens_details = self.input_tokens_details.map(|lhs| {
529            if let Some(tokens) = rhs.input_tokens_details {
530                lhs + tokens
531            } else {
532                lhs
533            }
534        });
535        let output_tokens = self.output_tokens + rhs.output_tokens;
536        let output_tokens_details = self.output_tokens_details + rhs.output_tokens_details;
537        let total_tokens = self.total_tokens + rhs.total_tokens;
538        Self {
539            input_tokens,
540            input_tokens_details,
541            output_tokens,
542            output_tokens_details,
543            total_tokens,
544        }
545    }
546}
547
548/// In-depth details on input tokens.
549#[derive(Clone, Debug, Serialize, Deserialize)]
550pub struct InputTokensDetails {
551    /// Cached tokens from OpenAI
552    pub cached_tokens: u64,
553}
554
555impl InputTokensDetails {
556    pub(crate) fn new() -> Self {
557        Self { cached_tokens: 0 }
558    }
559}
560
561impl Add for InputTokensDetails {
562    type Output = Self;
563    fn add(self, rhs: Self) -> Self::Output {
564        Self {
565            cached_tokens: self.cached_tokens + rhs.cached_tokens,
566        }
567    }
568}
569
570/// In-depth details on output tokens.
571#[derive(Clone, Debug, Serialize, Deserialize)]
572pub struct OutputTokensDetails {
573    /// Reasoning tokens
574    pub reasoning_tokens: u64,
575}
576
577impl OutputTokensDetails {
578    pub(crate) fn new() -> Self {
579        Self {
580            reasoning_tokens: 0,
581        }
582    }
583}
584
585impl Add for OutputTokensDetails {
586    type Output = Self;
587    fn add(self, rhs: Self) -> Self::Output {
588        Self {
589            reasoning_tokens: self.reasoning_tokens + rhs.reasoning_tokens,
590        }
591    }
592}
593
594/// Occasionally, when using OpenAI's Responses API you may get an incomplete response. This struct holds the reason as to why it happened.
595#[derive(Clone, Debug, Default, Serialize, Deserialize)]
596pub struct IncompleteDetailsReason {
597    /// The reason for an incomplete [`CompletionResponse`].
598    pub reason: String,
599}
600
601/// A response error from OpenAI's Response API.
602#[derive(Clone, Debug, Default, Serialize, Deserialize)]
603pub struct ResponseError {
604    /// Error code
605    pub code: String,
606    /// Error message
607    pub message: String,
608}
609
610/// A response object as an enum (ensures type validation)
611#[derive(Clone, Debug, Deserialize, Serialize)]
612#[serde(rename_all = "snake_case")]
613pub enum ResponseObject {
614    Response,
615}
616
617/// The response status as an enum (ensures type validation)
618#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
619#[serde(rename_all = "snake_case")]
620pub enum ResponseStatus {
621    InProgress,
622    Completed,
623    Failed,
624    Cancelled,
625    Queued,
626    Incomplete,
627}
628
629/// Attempt to try and create a `NewCompletionRequest` from a model name and [`crate::completion::CompletionRequest`]
630impl TryFrom<(String, crate::completion::CompletionRequest)> for CompletionRequest {
631    type Error = CompletionError;
632    fn try_from(
633        (model, req): (String, crate::completion::CompletionRequest),
634    ) -> Result<Self, Self::Error> {
635        let input = {
636            let mut partial_history = vec![];
637            if let Some(docs) = req.normalized_documents() {
638                partial_history.push(docs);
639            }
640            partial_history.extend(req.chat_history);
641
642            // Initialize full history with preamble (or empty if non-existent)
643            let mut full_history: Vec<InputItem> = Vec::new();
644
645            // Convert and extend the rest of the history
646            full_history.extend(
647                partial_history
648                    .into_iter()
649                    .map(|x| <Vec<InputItem>>::try_from(x).unwrap())
650                    .collect::<Vec<Vec<InputItem>>>()
651                    .into_iter()
652                    .flatten()
653                    .collect::<Vec<InputItem>>(),
654            );
655
656            full_history
657        };
658
659        let input = OneOrMany::many(input)
660            .expect("This should never panic - if it does, please file a bug report");
661
662        let stream = req
663            .additional_params
664            .clone()
665            .unwrap_or(Value::Null)
666            .as_bool();
667
668        let additional_parameters = if let Some(map) = req.additional_params {
669            serde_json::from_value::<AdditionalParameters>(map).expect("Converting additional parameters to AdditionalParameters should never fail as every field is an Option")
670        } else {
671            // If there's no additional parameters, initialise an empty object
672            AdditionalParameters::default()
673        };
674
675        let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
676
677        Ok(Self {
678            input,
679            model,
680            instructions: req.preamble,
681            max_output_tokens: req.max_tokens,
682            stream,
683            tool_choice,
684            tools: req
685                .tools
686                .into_iter()
687                .map(ResponsesToolDefinition::from)
688                .collect(),
689            temperature: req.temperature,
690            additional_parameters,
691        })
692    }
693}
694
695/// The completion model struct for OpenAI's response API.
696#[derive(Clone)]
697pub struct ResponsesCompletionModel<T = reqwest::Client> {
698    /// The OpenAI client
699    pub(crate) client: Client<T>,
700    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
701    pub model: String,
702}
703
704impl<T> ResponsesCompletionModel<T>
705where
706    T: HttpClientExt + Clone + Default + std::fmt::Debug + 'static,
707{
708    /// Creates a new [`ResponsesCompletionModel`].
709    pub fn new(client: Client<T>, model: &str) -> Self {
710        Self {
711            client,
712            model: model.to_string(),
713        }
714    }
715
716    /// Use the Completions API instead of Responses.
717    pub fn completions_api(self) -> crate::providers::openai::completion::CompletionModel<T> {
718        crate::providers::openai::completion::CompletionModel::new(self.client, &self.model)
719    }
720
721    /// Attempt to create a completion request from [`crate::completion::CompletionRequest`].
722    pub(crate) fn create_completion_request(
723        &self,
724        completion_request: crate::completion::CompletionRequest,
725    ) -> Result<CompletionRequest, CompletionError> {
726        let req = CompletionRequest::try_from((self.model.clone(), completion_request))?;
727
728        Ok(req)
729    }
730}
731
732/// The standard response format from OpenAI's Responses API.
733#[derive(Clone, Debug, Serialize, Deserialize)]
734pub struct CompletionResponse {
735    /// The ID of a completion response.
736    pub id: String,
737    /// The type of the object.
738    pub object: ResponseObject,
739    /// The time at which a given response has been created, in seconds from the UNIX epoch (01/01/1970 00:00:00).
740    pub created_at: u64,
741    /// The status of the response.
742    pub status: ResponseStatus,
743    /// Response error (optional)
744    pub error: Option<ResponseError>,
745    /// Incomplete response details (optional)
746    pub incomplete_details: Option<IncompleteDetailsReason>,
747    /// System prompt/preamble
748    pub instructions: Option<String>,
749    /// The maximum number of tokens the model should output
750    pub max_output_tokens: Option<u64>,
751    /// The model name
752    pub model: String,
753    /// Token usage
754    pub usage: Option<ResponsesUsage>,
755    /// The model output (messages, etc will go here)
756    pub output: Vec<Output>,
757    /// Tools
758    pub tools: Vec<ResponsesToolDefinition>,
759    /// Additional parameters
760    #[serde(flatten)]
761    pub additional_parameters: AdditionalParameters,
762}
763
764/// Additional parameters for the completion request type for OpenAI's Response API: <https://platform.openai.com/docs/api-reference/responses/create>
765/// Intended to be derived from [`crate::completion::request::CompletionRequest`].
766#[derive(Clone, Debug, Deserialize, Serialize, Default)]
767pub struct AdditionalParameters {
768    /// Whether or not a given model task should run in the background (ie a detached process).
769    #[serde(skip_serializing_if = "Option::is_none")]
770    pub background: Option<bool>,
771    /// The text response format. This is where you would add structured outputs (if you want them).
772    #[serde(skip_serializing_if = "Option::is_none")]
773    pub text: Option<TextConfig>,
774    /// What types of extra data you would like to include. This is mostly useless at the moment since the types of extra data to add is currently unsupported, but this will be coming soon!
775    #[serde(skip_serializing_if = "Option::is_none")]
776    pub include: Option<Vec<Include>>,
777    /// `top_p`. Mutually exclusive with the `temperature` argument.
778    #[serde(skip_serializing_if = "Option::is_none")]
779    pub top_p: Option<f64>,
780    /// Whether or not the response should be truncated.
781    #[serde(skip_serializing_if = "Option::is_none")]
782    pub truncation: Option<TruncationStrategy>,
783    /// The username of the user (that you want to use).
784    #[serde(skip_serializing_if = "Option::is_none")]
785    pub user: Option<String>,
786    /// Any additional metadata you'd like to add. This will additionally be returned by the response.
787    #[serde(skip_serializing_if = "Map::is_empty", default)]
788    pub metadata: serde_json::Map<String, serde_json::Value>,
789    /// Whether or not you want tool calls to run in parallel.
790    #[serde(skip_serializing_if = "Option::is_none")]
791    pub parallel_tool_calls: Option<bool>,
792    /// Previous response ID. If you are not sending a full conversation, this can help to track the message flow.
793    #[serde(skip_serializing_if = "Option::is_none")]
794    pub previous_response_id: Option<String>,
795    /// Add thinking/reasoning to your response. The response will be emitted as a list member of the `output` field.
796    #[serde(skip_serializing_if = "Option::is_none")]
797    pub reasoning: Option<Reasoning>,
798    /// The service tier you're using.
799    #[serde(skip_serializing_if = "Option::is_none")]
800    pub service_tier: Option<OpenAIServiceTier>,
801    /// Whether or not to store the response for later retrieval by API.
802    #[serde(skip_serializing_if = "Option::is_none")]
803    pub store: Option<bool>,
804}
805
806impl AdditionalParameters {
807    pub fn to_json(self) -> serde_json::Value {
808        serde_json::to_value(self).expect("this should never fail since a struct that impls Deserialize will always be valid JSON")
809    }
810}
811
812/// The truncation strategy.
813/// When using auto, if the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.
814/// Otherwise, does nothing (and is disabled by default).
815#[derive(Clone, Debug, Default, Serialize, Deserialize)]
816#[serde(rename_all = "snake_case")]
817pub enum TruncationStrategy {
818    Auto,
819    #[default]
820    Disabled,
821}
822
823/// The model output format configuration.
824/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
825#[derive(Clone, Debug, Serialize, Deserialize)]
826pub struct TextConfig {
827    pub format: TextFormat,
828}
829
830impl TextConfig {
831    pub(crate) fn structured_output<S>(name: S, schema: serde_json::Value) -> Self
832    where
833        S: Into<String>,
834    {
835        Self {
836            format: TextFormat::JsonSchema(StructuredOutputsInput {
837                name: name.into(),
838                schema,
839                strict: true,
840            }),
841        }
842    }
843}
844
845/// The text format (contained by [`TextConfig`]).
846/// You can either have plain text by default, or attach a JSON schema for the purposes of structured outputs.
847#[derive(Clone, Debug, Serialize, Deserialize, Default)]
848#[serde(tag = "type")]
849#[serde(rename_all = "snake_case")]
850pub enum TextFormat {
851    JsonSchema(StructuredOutputsInput),
852    #[default]
853    Text,
854}
855
856/// The inputs required for adding structured outputs.
857#[derive(Clone, Debug, Serialize, Deserialize)]
858pub struct StructuredOutputsInput {
859    /// The name of your schema.
860    pub name: String,
861    /// Your required output schema. It is recommended that you use the JsonSchema macro, which you can check out at <https://docs.rs/schemars/latest/schemars/trait.JsonSchema.html>.
862    pub schema: serde_json::Value,
863    /// Enable strict output. If you are using your AI agent in a data pipeline or another scenario that requires the data to be absolutely fixed to a given schema, it is recommended to set this to true.
864    pub strict: bool,
865}
866
867/// Add reasoning to a [`CompletionRequest`].
868#[derive(Clone, Debug, Default, Serialize, Deserialize)]
869pub struct Reasoning {
870    /// How much effort you want the model to put into thinking/reasoning.
871    pub effort: Option<ReasoningEffort>,
872    /// How much effort you want the model to put into writing the reasoning summary.
873    #[serde(skip_serializing_if = "Option::is_none")]
874    pub summary: Option<ReasoningSummaryLevel>,
875}
876
877impl Reasoning {
878    /// Creates a new Reasoning instantiation (with empty values).
879    pub fn new() -> Self {
880        Self {
881            effort: None,
882            summary: None,
883        }
884    }
885
886    /// Adds reasoning effort.
887    pub fn with_effort(mut self, reasoning_effort: ReasoningEffort) -> Self {
888        self.effort = Some(reasoning_effort);
889
890        self
891    }
892
893    /// Adds summary level (how detailed the reasoning summary will be).
894    pub fn with_summary_level(mut self, reasoning_summary_level: ReasoningSummaryLevel) -> Self {
895        self.summary = Some(reasoning_summary_level);
896
897        self
898    }
899}
900
901/// The billing service tier that will be used. On auto by default.
902#[derive(Clone, Debug, Default, Serialize, Deserialize)]
903#[serde(rename_all = "snake_case")]
904pub enum OpenAIServiceTier {
905    #[default]
906    Auto,
907    Default,
908    Flex,
909}
910
911/// The amount of reasoning effort that will be used by a given model.
912#[derive(Clone, Debug, Default, Serialize, Deserialize)]
913#[serde(rename_all = "snake_case")]
914pub enum ReasoningEffort {
915    Minimal,
916    Low,
917    #[default]
918    Medium,
919    High,
920}
921
922/// The amount of effort that will go into a reasoning summary by a given model.
923#[derive(Clone, Debug, Default, Serialize, Deserialize)]
924#[serde(rename_all = "snake_case")]
925pub enum ReasoningSummaryLevel {
926    #[default]
927    Auto,
928    Concise,
929    Detailed,
930}
931
932/// Results to additionally include in the OpenAI Responses API.
933/// Note that most of these are currently unsupported, but have been added for completeness.
934#[derive(Clone, Debug, Deserialize, Serialize)]
935pub enum Include {
936    #[serde(rename = "file_search_call.results")]
937    FileSearchCallResults,
938    #[serde(rename = "message.input_image.image_url")]
939    MessageInputImageImageUrl,
940    #[serde(rename = "computer_call.output.image_url")]
941    ComputerCallOutputOutputImageUrl,
942    #[serde(rename = "reasoning.encrypted_content")]
943    ReasoningEncryptedContent,
944    #[serde(rename = "code_interpreter_call.outputs")]
945    CodeInterpreterCallOutputs,
946}
947
948/// A currently non-exhaustive list of output types.
949#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
950#[serde(tag = "type")]
951#[serde(rename_all = "snake_case")]
952pub enum Output {
953    Message(OutputMessage),
954    #[serde(alias = "function_call")]
955    FunctionCall(OutputFunctionCall),
956    Reasoning {
957        id: String,
958        summary: Vec<ReasoningSummary>,
959    },
960}
961
962impl From<Output> for Vec<completion::AssistantContent> {
963    fn from(value: Output) -> Self {
964        let res: Vec<completion::AssistantContent> = match value {
965            Output::Message(OutputMessage { content, .. }) => content
966                .into_iter()
967                .map(completion::AssistantContent::from)
968                .collect(),
969            Output::FunctionCall(OutputFunctionCall {
970                id,
971                arguments,
972                call_id,
973                name,
974                ..
975            }) => vec![completion::AssistantContent::tool_call_with_call_id(
976                id, call_id, name, arguments,
977            )],
978            Output::Reasoning { id, summary } => {
979                let summary: Vec<String> = summary.into_iter().map(|x| x.text()).collect();
980
981                vec![completion::AssistantContent::Reasoning(
982                    message::Reasoning::multi(summary).with_id(id),
983                )]
984            }
985        };
986
987        res
988    }
989}
990
991#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
992pub struct OutputReasoning {
993    id: String,
994    summary: Vec<ReasoningSummary>,
995    status: ToolStatus,
996}
997
998/// An OpenAI Responses API tool call. A call ID will be returned that must be used when creating a tool result to send back to OpenAI as a message input, otherwise an error will be received.
999#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1000pub struct OutputFunctionCall {
1001    pub id: String,
1002    #[serde(with = "json_utils::stringified_json")]
1003    pub arguments: serde_json::Value,
1004    pub call_id: String,
1005    pub name: String,
1006    pub status: ToolStatus,
1007}
1008
1009/// The status of a given tool.
1010#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1011#[serde(rename_all = "snake_case")]
1012pub enum ToolStatus {
1013    InProgress,
1014    Completed,
1015    Incomplete,
1016}
1017
1018/// An output message from OpenAI's Responses API.
1019#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1020pub struct OutputMessage {
1021    /// The message ID. Must be included when sending the message back to OpenAI
1022    pub id: String,
1023    /// The role (currently only Assistant is available as this struct is only created when receiving an LLM message as a response)
1024    pub role: OutputRole,
1025    /// The status of the response
1026    pub status: ResponseStatus,
1027    /// The actual message content
1028    pub content: Vec<AssistantContent>,
1029}
1030
1031/// The role of an output message.
1032#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
1033#[serde(rename_all = "snake_case")]
1034pub enum OutputRole {
1035    Assistant,
1036}
1037
1038impl completion::CompletionModel for ResponsesCompletionModel<reqwest::Client> {
1039    type Response = CompletionResponse;
1040    type StreamingResponse = StreamingCompletionResponse;
1041
1042    #[cfg_attr(feature = "worker", worker::send)]
1043    async fn completion(
1044        &self,
1045        completion_request: crate::completion::CompletionRequest,
1046    ) -> Result<completion::CompletionResponse<Self::Response>, CompletionError> {
1047        let span = if tracing::Span::current().is_disabled() {
1048            info_span!(
1049                target: "rig::completions",
1050                "chat",
1051                gen_ai.operation.name = "chat",
1052                gen_ai.provider.name = tracing::field::Empty,
1053                gen_ai.request.model = tracing::field::Empty,
1054                gen_ai.response.id = tracing::field::Empty,
1055                gen_ai.response.model = tracing::field::Empty,
1056                gen_ai.usage.output_tokens = tracing::field::Empty,
1057                gen_ai.usage.input_tokens = tracing::field::Empty,
1058                gen_ai.input.messages = tracing::field::Empty,
1059                gen_ai.output.messages = tracing::field::Empty,
1060            )
1061        } else {
1062            tracing::Span::current()
1063        };
1064
1065        span.record("gen_ai.provider.name", "openai");
1066        span.record("gen_ai.request.model", &self.model);
1067        let request = self.create_completion_request(completion_request)?;
1068        span.record(
1069            "gen_ai.input.messages",
1070            serde_json::to_string(&request.input)
1071                .expect("openai request to successfully turn into a JSON value"),
1072        );
1073        let body = serde_json::to_vec(&request)?;
1074
1075        let req = self
1076            .client
1077            .post("/responses")?
1078            .header("Content-Type", "application/json")
1079            .body(body)
1080            .map_err(|e| CompletionError::HttpError(e.into()))?;
1081
1082        async move {
1083            let response = self.client.send(req).await?;
1084
1085            if response.status().is_success() {
1086                let t = http_client::text(response).await?;
1087                let response = serde_json::from_str::<Self::Response>(&t)?;
1088                let span = tracing::Span::current();
1089                span.record(
1090                    "gen_ai.output.messages",
1091                    serde_json::to_string(&response.output).unwrap(),
1092                );
1093                span.record("gen_ai.response.id", &response.id);
1094                span.record("gen_ai.response.model", &response.model);
1095                if let Some(ref usage) = response.usage {
1096                    span.record("gen_ai.usage.output_tokens", usage.output_tokens);
1097                    span.record("gen_ai.usage.input_tokens", usage.input_tokens);
1098                }
1099                // We need to call the event here to get the span to actually send anything
1100                tracing::info!("API successfully called");
1101                response.try_into()
1102            } else {
1103                let text = http_client::text(response).await?;
1104                Err(CompletionError::ProviderError(text))
1105            }
1106        }
1107        .instrument(span)
1108        .await
1109    }
1110
1111    #[cfg_attr(feature = "worker", worker::send)]
1112    async fn stream(
1113        &self,
1114        request: crate::completion::CompletionRequest,
1115    ) -> Result<
1116        crate::streaming::StreamingCompletionResponse<Self::StreamingResponse>,
1117        CompletionError,
1118    > {
1119        ResponsesCompletionModel::stream(self, request).await
1120    }
1121}
1122
1123impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
1124    type Error = CompletionError;
1125
1126    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
1127        if response.output.is_empty() {
1128            return Err(CompletionError::ResponseError(
1129                "Response contained no parts".to_owned(),
1130            ));
1131        }
1132
1133        let content: Vec<completion::AssistantContent> = response
1134            .output
1135            .iter()
1136            .cloned()
1137            .flat_map(<Vec<completion::AssistantContent>>::from)
1138            .collect();
1139
1140        let choice = OneOrMany::many(content).map_err(|_| {
1141            CompletionError::ResponseError(
1142                "Response contained no message or tool call (empty)".to_owned(),
1143            )
1144        })?;
1145
1146        let usage = response
1147            .usage
1148            .as_ref()
1149            .map(|usage| completion::Usage {
1150                input_tokens: usage.input_tokens,
1151                output_tokens: usage.output_tokens,
1152                total_tokens: usage.total_tokens,
1153            })
1154            .unwrap_or_default();
1155
1156        Ok(completion::CompletionResponse {
1157            choice,
1158            usage,
1159            raw_response: response,
1160        })
1161    }
1162}
1163
1164/// An OpenAI Responses API message.
1165#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1166#[serde(tag = "role", rename_all = "lowercase")]
1167pub enum Message {
1168    #[serde(alias = "developer")]
1169    System {
1170        #[serde(deserialize_with = "string_or_one_or_many")]
1171        content: OneOrMany<SystemContent>,
1172        #[serde(skip_serializing_if = "Option::is_none")]
1173        name: Option<String>,
1174    },
1175    User {
1176        #[serde(deserialize_with = "string_or_one_or_many")]
1177        content: OneOrMany<UserContent>,
1178        #[serde(skip_serializing_if = "Option::is_none")]
1179        name: Option<String>,
1180    },
1181    Assistant {
1182        content: OneOrMany<AssistantContentType>,
1183        #[serde(skip_serializing_if = "String::is_empty")]
1184        id: String,
1185        #[serde(skip_serializing_if = "Option::is_none")]
1186        name: Option<String>,
1187        status: ToolStatus,
1188    },
1189    #[serde(rename = "tool")]
1190    ToolResult {
1191        tool_call_id: String,
1192        output: String,
1193    },
1194}
1195
1196/// The type of a tool result content item.
1197#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Clone)]
1198#[serde(rename_all = "lowercase")]
1199pub enum ToolResultContentType {
1200    #[default]
1201    Text,
1202}
1203
1204impl Message {
1205    pub fn system(content: &str) -> Self {
1206        Message::System {
1207            content: OneOrMany::one(content.to_owned().into()),
1208            name: None,
1209        }
1210    }
1211}
1212
1213/// Text assistant content.
1214/// Note that the text type in comparison to the Completions API is actually `output_text` rather than `text`.
1215#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1216#[serde(tag = "type", rename_all = "snake_case")]
1217pub enum AssistantContent {
1218    OutputText(Text),
1219    Refusal { refusal: String },
1220}
1221
1222impl From<AssistantContent> for completion::AssistantContent {
1223    fn from(value: AssistantContent) -> Self {
1224        match value {
1225            AssistantContent::Refusal { refusal } => {
1226                completion::AssistantContent::Text(Text { text: refusal })
1227            }
1228            AssistantContent::OutputText(Text { text }) => {
1229                completion::AssistantContent::Text(Text { text })
1230            }
1231        }
1232    }
1233}
1234
1235/// The type of assistant content.
1236#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1237#[serde(untagged)]
1238pub enum AssistantContentType {
1239    Text(AssistantContent),
1240    ToolCall(OutputFunctionCall),
1241    Reasoning(OpenAIReasoning),
1242}
1243
1244/// Different types of user content.
1245#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
1246#[serde(tag = "type", rename_all = "snake_case")]
1247pub enum UserContent {
1248    InputText {
1249        text: String,
1250    },
1251    InputImage {
1252        image_url: String,
1253        #[serde(default)]
1254        detail: ImageDetail,
1255    },
1256    InputFile {
1257        #[serde(skip_serializing_if = "Option::is_none")]
1258        file_url: Option<String>,
1259        #[serde(skip_serializing_if = "Option::is_none")]
1260        file_data: Option<String>,
1261        #[serde(skip_serializing_if = "Option::is_none")]
1262        filename: Option<String>,
1263    },
1264    Audio {
1265        input_audio: InputAudio,
1266    },
1267    #[serde(rename = "tool")]
1268    ToolResult {
1269        tool_call_id: String,
1270        output: String,
1271    },
1272}
1273
1274impl TryFrom<message::Message> for Vec<Message> {
1275    type Error = message::MessageError;
1276
1277    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
1278        match message {
1279            message::Message::User { content } => {
1280                let (tool_results, other_content): (Vec<_>, Vec<_>) = content
1281                    .into_iter()
1282                    .partition(|content| matches!(content, message::UserContent::ToolResult(_)));
1283
1284                // If there are messages with both tool results and user content, openai will only
1285                //  handle tool results. It's unlikely that there will be both.
1286                if !tool_results.is_empty() {
1287                    tool_results
1288                        .into_iter()
1289                        .map(|content| match content {
1290                            message::UserContent::ToolResult(message::ToolResult {
1291                                call_id,
1292                                content,
1293                                ..
1294                            }) => Ok::<_, message::MessageError>(Message::ToolResult {
1295                                tool_call_id: call_id.expect("The tool call ID should exist"),
1296                                output: {
1297                                    let res = content.first();
1298                                    match res {
1299                                        completion::message::ToolResultContent::Text(Text {
1300                                            text,
1301                                        }) => text,
1302                                        _ => return  Err(MessageError::ConversionError("This API only currently supports text tool results".into()))
1303                                    }
1304                                },
1305                            }),
1306                            _ => unreachable!(),
1307                        })
1308                        .collect::<Result<Vec<_>, _>>()
1309                } else {
1310                    let other_content = other_content
1311                        .into_iter()
1312                        .map(|content| match content {
1313                            message::UserContent::Text(message::Text { text }) => {
1314                                Ok(UserContent::InputText { text })
1315                            }
1316                            message::UserContent::Image(message::Image {
1317                                data,
1318                                detail,
1319                                media_type,
1320                                ..
1321                            }) => {
1322                                let url = match data {
1323                                    DocumentSourceKind::Base64(data) => {
1324                                        let media_type = if let Some(media_type) = media_type {
1325                                            media_type.to_mime_type().to_string()
1326                                        } else {
1327                                            String::new()
1328                                        };
1329                                        format!("data:{media_type};base64,{data}")
1330                                    }
1331                                    DocumentSourceKind::Url(url) => url,
1332                                    DocumentSourceKind::Raw(_) => {
1333                                        return Err(MessageError::ConversionError(
1334                                            "Raw files not supported, encode as base64 first"
1335                                                .into(),
1336                                        ));
1337                                    }
1338                                    doc => {
1339                                        return Err(MessageError::ConversionError(format!(
1340                                            "Unsupported document type: {doc}"
1341                                        )));
1342                                    }
1343                                };
1344
1345                                Ok(UserContent::InputImage {
1346                                    image_url: url,
1347                                    detail: detail.unwrap_or_default(),
1348                                })
1349                            }
1350                            message::UserContent::Document(message::Document {
1351                                media_type: Some(DocumentMediaType::PDF),
1352                                data,
1353                                ..
1354                            }) => {
1355                                let (file_data, file_url) = match data {
1356                                    DocumentSourceKind::Base64(data) => {
1357                                        (Some(format!("data:application/pdf;base64,{data}")), None)
1358                                    }
1359                                    DocumentSourceKind::Url(url) => (None, Some(url)),
1360                                    DocumentSourceKind::Raw(_) => {
1361                                        return Err(MessageError::ConversionError(
1362                                            "Raw files not supported, encode as base64 first"
1363                                                .into(),
1364                                        ));
1365                                    }
1366                                    doc => {
1367                                        return Err(MessageError::ConversionError(format!(
1368                                            "Unsupported document type: {doc}"
1369                                        )));
1370                                    }
1371                                };
1372
1373                                Ok(UserContent::InputFile {
1374                                    file_url,
1375                                    file_data,
1376                                    filename: Some("document.pdf".into()),
1377                                })
1378                            }
1379                            message::UserContent::Document(message::Document {
1380                                data: DocumentSourceKind::Base64(text),
1381                                ..
1382                            }) => Ok(UserContent::InputText { text }),
1383                            message::UserContent::Audio(message::Audio {
1384                                data: DocumentSourceKind::Base64(data),
1385                                media_type,
1386                                ..
1387                            }) => Ok(UserContent::Audio {
1388                                input_audio: InputAudio {
1389                                    data,
1390                                    format: match media_type {
1391                                        Some(media_type) => media_type,
1392                                        None => AudioMediaType::MP3,
1393                                    },
1394                                },
1395                            }),
1396                            message::UserContent::Audio(_) => Err(MessageError::ConversionError(
1397                                "Audio must be base64 encoded data".into(),
1398                            )),
1399                            _ => unreachable!(),
1400                        })
1401                        .collect::<Result<Vec<_>, _>>()?;
1402
1403                    let other_content = OneOrMany::many(other_content).expect(
1404                        "There must be other content here if there were no tool result content",
1405                    );
1406
1407                    Ok(vec![Message::User {
1408                        content: other_content,
1409                        name: None,
1410                    }])
1411                }
1412            }
1413            message::Message::Assistant { content, id } => {
1414                let assistant_message_id = id;
1415
1416                match content.first() {
1417                    crate::message::AssistantContent::Text(Text { text }) => {
1418                        Ok(vec![Message::Assistant {
1419                            id: assistant_message_id
1420                                .expect("The assistant message ID should exist"),
1421                            status: ToolStatus::Completed,
1422                            content: OneOrMany::one(AssistantContentType::Text(
1423                                AssistantContent::OutputText(Text { text }),
1424                            )),
1425                            name: None,
1426                        }])
1427                    }
1428                    crate::message::AssistantContent::ToolCall(crate::message::ToolCall {
1429                        id,
1430                        call_id,
1431                        function,
1432                    }) => Ok(vec![Message::Assistant {
1433                        content: OneOrMany::one(AssistantContentType::ToolCall(
1434                            OutputFunctionCall {
1435                                call_id: call_id.expect("The call ID should exist"),
1436                                arguments: function.arguments,
1437                                id,
1438                                name: function.name,
1439                                status: ToolStatus::Completed,
1440                            },
1441                        )),
1442                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1443                        name: None,
1444                        status: ToolStatus::Completed,
1445                    }]),
1446                    crate::message::AssistantContent::Reasoning(crate::message::Reasoning {
1447                        id,
1448                        reasoning,
1449                    }) => Ok(vec![Message::Assistant {
1450                        content: OneOrMany::one(AssistantContentType::Reasoning(OpenAIReasoning {
1451                            id: id.expect("An OpenAI-generated ID is required when using OpenAI reasoning items"),
1452                            summary: reasoning.into_iter().map(|x| ReasoningSummary::SummaryText { text: x }).collect(),
1453                            encrypted_content: None,
1454                            status: Some(ToolStatus::Completed),
1455                        })),
1456                        id: assistant_message_id.expect("The assistant message ID should exist!"),
1457                        name: None,
1458                        status: (ToolStatus::Completed),
1459                    }]),
1460                }
1461            }
1462        }
1463    }
1464}
1465
1466impl FromStr for UserContent {
1467    type Err = Infallible;
1468
1469    fn from_str(s: &str) -> Result<Self, Self::Err> {
1470        Ok(UserContent::InputText {
1471            text: s.to_string(),
1472        })
1473    }
1474}