async_openai/types/responses/
response.rs

1use crate::error::OpenAIError;
2pub use crate::types::chat::{
3    CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort,
4    ResponseFormatJsonSchema,
5};
6use crate::types::{MCPListToolsTool, MCPTool};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15    #[default]
16    User,
17    Assistant,
18    System,
19    Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26    InProgress,
27    Completed,
28    Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34    ///  A text input to the model, equivalent to a text input with the
35    /// `user` role.
36    Text(String),
37    /// A list of one or many input items to the model, containing
38    /// different content types.
39    Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52    /// A message (type: "message").
53    /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54    ///
55    /// InputMessage:
56    ///     A message input to the model with a role indicating instruction following hierarchy.
57    ///     Instructions given with the developer or system role take precedence over instructions given with the user role.
58    /// OutputMessage:
59    ///     A message output from the model.
60    Message(MessageItem),
61
62    /// The results of a file search tool call. See the
63    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64    FileSearchCall(FileSearchToolCall),
65
66    /// A tool call to a computer use tool. See the
67    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68    ComputerCall(ComputerToolCall),
69
70    /// The output of a computer tool call.
71    ComputerCallOutput(ComputerCallOutputItemParam),
72
73    /// The results of a web search tool call. See the
74    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75    WebSearchCall(WebSearchToolCall),
76
77    /// A tool call to run a function. See the
78    ///
79    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80    FunctionCall(FunctionToolCall),
81
82    /// The output of a function tool call.
83    FunctionCallOutput(FunctionCallOutputItemParam),
84
85    /// A description of the chain of thought used by a reasoning model while generating
86    /// a response. Be sure to include these items in your `input` to the Responses API
87    /// for subsequent turns of a conversation if you are manually
88    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89    Reasoning(ReasoningItem),
90
91    /// An image generation request made by the model.
92    ImageGenerationCall(ImageGenToolCall),
93
94    /// A tool call to run code.
95    CodeInterpreterCall(CodeInterpreterToolCall),
96
97    /// A tool call to run a command on the local shell.
98    LocalShellCall(LocalShellToolCall),
99
100    /// The output of a local shell tool call.
101    LocalShellCallOutput(LocalShellToolCallOutput),
102
103    /// A tool representing a request to execute one or more shell commands.
104    FunctionShellCall(FunctionShellCallItemParam),
105
106    /// The streamed output items emitted by a function shell tool call.
107    FunctionShellCallOutput(FunctionShellCallOutputItemParam),
108
109    /// A tool call representing a request to create, delete, or update files using diff patches.
110    ApplyPatchCall(ApplyPatchToolCallItemParam),
111
112    /// The streamed output emitted by an apply patch tool call.
113    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
114
115    /// A list of tools available on an MCP server.
116    McpListTools(MCPListTools),
117
118    /// A request for human approval of a tool invocation.
119    McpApprovalRequest(MCPApprovalRequest),
120
121    /// A response to an MCP approval request.
122    McpApprovalResponse(MCPApprovalResponse),
123
124    /// An invocation of a tool on an MCP server.
125    McpCall(MCPToolCall),
126
127    /// The output of a custom tool call from your code, being sent back to the model.
128    CustomToolCallOutput(CustomToolCallOutput),
129
130    /// A call to a custom tool created by the model.
131    CustomToolCall(CustomToolCall),
132}
133
134/// Input item that can be used in the context for generating a response.
135///
136/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
137/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
138/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
139/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
140///
141/// Uses untagged deserialization because these types overlap in structure.
142/// Order matters: more specific structures are tried first.
143///
144/// # OpenAPI Specification
145/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
146#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
147#[serde(untagged)]
148pub enum InputItem {
149    /// A reference to an existing item by ID.
150    /// Has a required `id` field and optional `type` (can be "item_reference" or null).
151    /// Must be tried first as it's the most minimal structure.
152    ItemReference(ItemReference),
153
154    /// All structured items with proper type discrimination.
155    /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
156    /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
157    Item(Item),
158
159    /// A simple, user-friendly message input (EasyInputMessage).
160    /// Supports string content and can include assistant role for previous responses.
161    /// Must be tried last as it's the most flexible structure.
162    ///
163    /// A message input to the model with a role indicating instruction following
164    /// hierarchy. Instructions given with the `developer` or `system` role take
165    /// precedence over instructions given with the `user` role. Messages with the
166    /// `assistant` role are presumed to have been generated by the model in previous
167    /// interactions.
168    EasyMessage(EasyInputMessage),
169}
170
171/// A message item used within the `Item` enum.
172///
173/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
174/// enum to distinguish them based on their structure:
175/// - OutputMessage: role=assistant, required id & status fields
176/// - InputMessage: role=user/system/developer, content is Vec<ContentType>, optional id/status
177///
178/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
179/// not part of the structured `Item` enum.
180#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
181#[serde(untagged)]
182pub enum MessageItem {
183    /// An output message from the model (role: assistant, has required id & status).
184    /// This must come first as it has the most specific structure (required id and status fields).
185    Output(OutputMessage),
186
187    /// A structured input message (role: user/system/developer, content is Vec<ContentType>).
188    /// Has structured content list and optional id/status fields.
189    ///
190    /// A message input to the model with a role indicating instruction following hierarchy.
191    /// Instructions given with the `developer` or `system` role take precedence over instructions
192    /// given with the `user` role.
193    Input(InputMessage),
194}
195
196/// A reference to an existing item by ID.
197#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
198pub struct ItemReference {
199    /// The type of item to reference. Can be "item_reference" or null.
200    #[serde(skip_serializing_if = "Option::is_none")]
201    pub r#type: Option<ItemReferenceType>,
202    /// The ID of the item to reference.
203    pub id: String,
204}
205
206#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
207#[serde(rename_all = "snake_case")]
208pub enum ItemReferenceType {
209    ItemReference,
210}
211
212/// Output from a function call that you're providing back to the model.
213#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
214pub struct FunctionCallOutputItemParam {
215    /// The unique ID of the function tool call generated by the model.
216    pub call_id: String,
217    /// Text, image, or file output of the function tool call.
218    pub output: FunctionCallOutput,
219    /// The unique ID of the function tool call output.
220    /// Populated when this item is returned via API.
221    #[serde(skip_serializing_if = "Option::is_none")]
222    pub id: Option<String>,
223    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
224    /// Populated when items are returned via API.
225    #[serde(skip_serializing_if = "Option::is_none")]
226    pub status: Option<OutputStatus>,
227}
228
229#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
230#[serde(untagged)]
231pub enum FunctionCallOutput {
232    /// A JSON string of the output of the function tool call.
233    Text(String),
234    Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
235}
236
237#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
238pub struct ComputerCallOutputItemParam {
239    /// The ID of the computer tool call that produced the output.
240    pub call_id: String,
241    /// A computer screenshot image used with the computer use tool.
242    pub output: ComputerScreenshotImage,
243    /// The safety checks reported by the API that have been acknowledged by the developer.
244    #[serde(skip_serializing_if = "Option::is_none")]
245    pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
246    /// The unique ID of the computer tool call output. Optional when creating.
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub id: Option<String>,
249    /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
250    /// Populated when input items are returned via API.
251    #[serde(skip_serializing_if = "Option::is_none")]
252    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
253}
254
255#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
256#[serde(rename_all = "snake_case")]
257pub enum ComputerScreenshotImageType {
258    ComputerScreenshot,
259}
260
261/// A computer screenshot image used with the computer use tool.
262#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
263pub struct ComputerScreenshotImage {
264    /// Specifies the event type. For a computer screenshot, this property is always
265    /// set to `computer_screenshot`.
266    pub r#type: ComputerScreenshotImageType,
267    /// The identifier of an uploaded file that contains the screenshot.
268    #[serde(skip_serializing_if = "Option::is_none")]
269    pub file_id: Option<String>,
270    /// The URL of the screenshot image.
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub image_url: Option<String>,
273}
274
275/// Output from a local shell tool call that you're providing back to the model.
276#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
277pub struct LocalShellToolCallOutput {
278    /// The unique ID of the local shell tool call generated by the model.
279    pub id: String,
280
281    /// A JSON string of the output of the local shell tool call.
282    pub output: String,
283
284    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
285    #[serde(skip_serializing_if = "Option::is_none")]
286    pub status: Option<OutputStatus>,
287}
288
289/// Output from a local shell command execution.
290#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
291pub struct LocalShellOutput {
292    /// The stdout output from the command.
293    #[serde(skip_serializing_if = "Option::is_none")]
294    pub stdout: Option<String>,
295
296    /// The stderr output from the command.
297    #[serde(skip_serializing_if = "Option::is_none")]
298    pub stderr: Option<String>,
299
300    /// The exit code of the command.
301    #[serde(skip_serializing_if = "Option::is_none")]
302    pub exit_code: Option<i32>,
303}
304
305/// An MCP approval response that you're providing back to the model.
306#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
307pub struct MCPApprovalResponse {
308    /// The ID of the approval request being answered.
309    pub approval_request_id: String,
310
311    /// Whether the request was approved.
312    pub approve: bool,
313
314    /// The unique ID of the approval response
315    #[serde(skip_serializing_if = "Option::is_none")]
316    pub id: Option<String>,
317
318    /// Optional reason for the decision.
319    #[serde(skip_serializing_if = "Option::is_none")]
320    pub reason: Option<String>,
321}
322
323#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
324#[serde(untagged)]
325pub enum CustomToolCallOutputOutput {
326    /// A string of the output of the custom tool call.
327    Text(String),
328    /// Text, image, or file output of the custom tool call.
329    List(Vec<InputContent>),
330}
331
332#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
333pub struct CustomToolCallOutput {
334    /// The call ID, used to map this custom tool call output to a custom tool call.
335    pub call_id: String,
336
337    /// The output from the custom tool call generated by your code.
338    /// Can be a string or an list of output content.
339    pub output: CustomToolCallOutputOutput,
340
341    /// The unique ID of the custom tool call output in the OpenAI platform.
342    #[serde(skip_serializing_if = "Option::is_none")]
343    pub id: Option<String>,
344}
345
346/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
347///
348/// This is the most user-friendly way to provide messages, supporting both simple
349/// string content and structured content. Role can include `assistant` for providing
350/// previous assistant responses.
351#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
352#[builder(
353    name = "EasyInputMessageArgs",
354    pattern = "mutable",
355    setter(into, strip_option),
356    default
357)]
358#[builder(build_fn(error = "OpenAIError"))]
359pub struct EasyInputMessage {
360    /// The type of the message input. Always set to `message`.
361    pub r#type: MessageType,
362    /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
363    pub role: Role,
364    /// Text, image, or audio input to the model, used to generate a response.
365    /// Can also contain previous assistant responses.
366    pub content: EasyInputContent,
367}
368
369/// A structured message input to the model (InputMessage in the OpenAPI spec).
370///
371/// This variant requires structured content (not a simple string) and does not support
372/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
373#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
374#[builder(
375    name = "InputMessageArgs",
376    pattern = "mutable",
377    setter(into, strip_option),
378    default
379)]
380#[builder(build_fn(error = "OpenAIError"))]
381pub struct InputMessage {
382    /// A list of one or many input items to the model, containing different content types.
383    pub content: Vec<InputContent>,
384    /// The role of the message input. One of `user`, `system`, or `developer`.
385    /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
386    pub role: InputRole,
387    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
388    /// Populated when items are returned via API.
389    #[serde(skip_serializing_if = "Option::is_none")]
390    pub status: Option<OutputStatus>,
391    /////The type of the message input. Always set to `message`.
392    //pub r#type: MessageType,
393}
394
395/// The role for an input message - can only be `user`, `system`, or `developer`.
396/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
397#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
398#[serde(rename_all = "lowercase")]
399pub enum InputRole {
400    #[default]
401    User,
402    System,
403    Developer,
404}
405
406/// Content for EasyInputMessage - can be a simple string or structured list.
407#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
408#[serde(untagged)]
409pub enum EasyInputContent {
410    /// A text input to the model.
411    Text(String),
412    /// A list of one or many input items to the model, containing different content types.
413    ContentList(Vec<InputContent>),
414}
415
416/// Parts of a message: text, image, file, or audio.
417#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
418#[serde(tag = "type", rename_all = "snake_case")]
419pub enum InputContent {
420    /// A text input to the model.
421    InputText(InputTextContent),
422    /// An image input to the model. Learn about
423    /// [image inputs](https://platform.openai.com/docs/guides/vision).
424    InputImage(InputImageContent),
425    /// A file input to the model.
426    InputFile(InputFileContent),
427}
428
429#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
430pub struct InputTextContent {
431    /// The text input to the model.
432    pub text: String,
433}
434
435#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
436#[builder(
437    name = "InputImageArgs",
438    pattern = "mutable",
439    setter(into, strip_option),
440    default
441)]
442#[builder(build_fn(error = "OpenAIError"))]
443pub struct InputImageContent {
444    /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
445    /// Defaults to `auto`.
446    pub detail: ImageDetail,
447    /// The ID of the file to be sent to the model.
448    #[serde(skip_serializing_if = "Option::is_none")]
449    pub file_id: Option<String>,
450    /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
451    /// in a data URL.
452    #[serde(skip_serializing_if = "Option::is_none")]
453    pub image_url: Option<String>,
454}
455
456#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
457#[builder(
458    name = "InputFileArgs",
459    pattern = "mutable",
460    setter(into, strip_option),
461    default
462)]
463#[builder(build_fn(error = "OpenAIError"))]
464pub struct InputFileContent {
465    /// The content of the file to be sent to the model.
466    #[serde(skip_serializing_if = "Option::is_none")]
467    file_data: Option<String>,
468    /// The ID of the file to be sent to the model.
469    #[serde(skip_serializing_if = "Option::is_none")]
470    file_id: Option<String>,
471    /// The URL of the file to be sent to the model.
472    #[serde(skip_serializing_if = "Option::is_none")]
473    file_url: Option<String>,
474    /// The name of the file to be sent to the model.
475    #[serde(skip_serializing_if = "Option::is_none")]
476    filename: Option<String>,
477}
478
479#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
480pub struct Conversation {
481    /// The unique ID of the conversation.
482    pub id: String,
483}
484
485#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
486#[serde(untagged)]
487pub enum ConversationParam {
488    /// The unique ID of the conversation.
489    ConversationID(String),
490    /// The conversation that this response belongs to.
491    Object(Conversation),
492}
493
494#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
495pub enum IncludeEnum {
496    #[serde(rename = "file_search_call.results")]
497    FileSearchCallResults,
498    #[serde(rename = "web_search_call.results")]
499    WebSearchCallResults,
500    #[serde(rename = "web_search_call.action.sources")]
501    WebSearchCallActionSources,
502    #[serde(rename = "message.input_image.image_url")]
503    MessageInputImageImageUrl,
504    #[serde(rename = "computer_call_output.output.image_url")]
505    ComputerCallOutputOutputImageUrl,
506    #[serde(rename = "code_interpreter_call.outputs")]
507    CodeInterpreterCallOutputs,
508    #[serde(rename = "reasoning.encrypted_content")]
509    ReasoningEncryptedContent,
510    #[serde(rename = "message.output_text.logprobs")]
511    MessageOutputTextLogprobs,
512}
513
514#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
515pub struct ResponseStreamOptions {
516    /// When true, stream obfuscation will be enabled. Stream obfuscation adds
517    /// random characters to an `obfuscation` field on streaming delta events to
518    /// normalize payload sizes as a mitigation to certain side-channel attacks.
519    /// These obfuscation fields are included by default, but add a small amount
520    /// of overhead to the data stream. You can set `include_obfuscation` to
521    /// false to optimize for bandwidth if you trust the network links between
522    /// your application and the OpenAI API.
523    #[serde(skip_serializing_if = "Option::is_none")]
524    pub include_obfuscation: Option<bool>,
525}
526
527/// Builder for a Responses API request.
528#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
529#[builder(
530    name = "CreateResponseArgs",
531    pattern = "mutable",
532    setter(into, strip_option),
533    default
534)]
535#[builder(build_fn(error = "OpenAIError"))]
536pub struct CreateResponse {
537    /// Whether to run the model response in the background.
538    /// [Learn more](https://platform.openai.com/docs/guides/background).
539    #[serde(skip_serializing_if = "Option::is_none")]
540    pub background: Option<bool>,
541
542    /// The conversation that this response belongs to. Items from this conversation are prepended to
543    ///  `input_items` for this response request.
544    ///
545    /// Input items and output items from this response are automatically added to this conversation after
546    /// this response completes.
547    #[serde(skip_serializing_if = "Option::is_none")]
548    pub conversation: Option<ConversationParam>,
549
550    /// Specify additional output data to include in the model response. Currently supported
551    /// values are:
552    ///
553    /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
554    ///
555    /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
556    ///   interpreter tool call items.
557    ///
558    /// - `computer_call_output.output.image_url`: Include image urls from the computer call
559    ///   output.
560    ///
561    /// - `file_search_call.results`: Include the search results of the file search tool call.
562    ///
563    /// - `message.input_image.image_url`: Include image urls from the input message.
564    ///
565    /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
566    ///
567    /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
568    ///   reasoning item outputs. This enables reasoning items to be used in multi-turn
569    ///   conversations when using the Responses API statelessly (like when the `store` parameter is
570    ///   set to `false`, or when an organization is enrolled in the zero data retention program).
571    #[serde(skip_serializing_if = "Option::is_none")]
572    pub include: Option<Vec<IncludeEnum>>,
573
574    /// Text, image, or file inputs to the model, used to generate a response.
575    ///
576    /// Learn more:
577    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
578    /// - [Image inputs](https://platform.openai.com/docs/guides/images)
579    /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
580    /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
581    /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
582    pub input: InputParam,
583
584    /// A system (or developer) message inserted into the model's context.
585    ///
586    /// When using along with `previous_response_id`, the instructions from a previous
587    /// response will not be carried over to the next response. This makes it simple
588    /// to swap out system (or developer) messages in new responses.
589    #[serde(skip_serializing_if = "Option::is_none")]
590    pub instructions: Option<String>,
591
592    /// An upper bound for the number of tokens that can be generated for a response, including
593    /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
594    #[serde(skip_serializing_if = "Option::is_none")]
595    pub max_output_tokens: Option<u32>,
596
597    /// The maximum number of total calls to built-in tools that can be processed in a response. This
598    /// maximum number applies across all built-in tool calls, not per individual tool. Any further
599    /// attempts to call a tool by the model will be ignored.
600    #[serde(skip_serializing_if = "Option::is_none")]
601    pub max_tool_calls: Option<u32>,
602
603    /// Set of 16 key-value pairs that can be attached to an object. This can be
604    /// useful for storing additional information about the object in a structured
605    /// format, and querying for objects via API or the dashboard.
606    ///
607    /// Keys are strings with a maximum length of 64 characters. Values are
608    /// strings with a maximum length of 512 characters.
609    #[serde(skip_serializing_if = "Option::is_none")]
610    pub metadata: Option<HashMap<String, String>>,
611
612    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
613    /// offers a wide range of models with different capabilities, performance
614    /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
615    /// to browse and compare available models.
616    #[serde(skip_serializing_if = "Option::is_none")]
617    pub model: Option<String>,
618
619    /// Whether to allow the model to run tool calls in parallel.
620    #[serde(skip_serializing_if = "Option::is_none")]
621    pub parallel_tool_calls: Option<bool>,
622
623    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
624    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
625    /// Cannot be used in conjunction with `conversation`.
626    #[serde(skip_serializing_if = "Option::is_none")]
627    pub previous_response_id: Option<String>,
628
629    /// Reference to a prompt template and its variables.
630    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
631    #[serde(skip_serializing_if = "Option::is_none")]
632    pub prompt: Option<Prompt>,
633
634    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
635    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
636    #[serde(skip_serializing_if = "Option::is_none")]
637    pub prompt_cache_key: Option<String>,
638
639    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
640    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
641    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).    
642    #[serde(skip_serializing_if = "Option::is_none")]
643    pub prompt_cache_retention: Option<PromptCacheRetention>,
644
645    /// **gpt-5 and o-series models only**
646    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
647    #[serde(skip_serializing_if = "Option::is_none")]
648    pub reasoning: Option<Reasoning>,
649
650    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
651    /// usage policies.
652    ///
653    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
654    /// or email address, in order to avoid sending us any identifying information. [Learn
655    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
656    #[serde(skip_serializing_if = "Option::is_none")]
657    pub safety_identifier: Option<String>,
658
659    /// Specifies the processing type used for serving the request.
660    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
661    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
662    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
663    /// - When not set, the default behavior is 'auto'.
664    ///
665    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
666    #[serde(skip_serializing_if = "Option::is_none")]
667    pub service_tier: Option<ServiceTier>,
668
669    /// Whether to store the generated model response for later retrieval via API.
670    #[serde(skip_serializing_if = "Option::is_none")]
671    pub store: Option<bool>,
672
673    /// If set to true, the model response data will be streamed to the client
674    /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
675    /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
676    /// for more information.
677    #[serde(skip_serializing_if = "Option::is_none")]
678    pub stream: Option<bool>,
679
680    /// Options for streaming responses. Only set this when you set `stream: true`.
681    #[serde(skip_serializing_if = "Option::is_none")]
682    pub stream_options: Option<ResponseStreamOptions>,
683
684    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
685    /// will make the output more random, while lower values like 0.2 will make it
686    /// more focused and deterministic. We generally recommend altering this or
687    /// `top_p` but not both.
688    #[serde(skip_serializing_if = "Option::is_none")]
689    pub temperature: Option<f32>,
690
691    /// Configuration options for a text response from the model. Can be plain
692    /// text or structured JSON data. Learn more:
693    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
694    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
695    #[serde(skip_serializing_if = "Option::is_none")]
696    pub text: Option<ResponseTextParam>,
697
698    /// How the model should select which tool (or tools) to use when generating
699    /// a response. See the `tools` parameter to see how to specify which tools
700    /// the model can call.
701    #[serde(skip_serializing_if = "Option::is_none")]
702    pub tool_choice: Option<ToolChoiceParam>,
703
704    /// An array of tools the model may call while generating a response. You
705    /// can specify which tool to use by setting the `tool_choice` parameter.
706    ///
707    /// We support the following categories of tools:
708    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
709    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
710    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
711    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
712    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
713    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
714    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
715    /// - **Function calls (custom tools)**: Functions that are defined by you,
716    ///   enabling the model to call your own code with strongly typed arguments
717    ///   and outputs. Learn more about
718    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
719    ///   custom tools to call your own code.
720    #[serde(skip_serializing_if = "Option::is_none")]
721    pub tools: Option<Vec<Tool>>,
722
723    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
724    /// token position, each with an associated log probability.
725    #[serde(skip_serializing_if = "Option::is_none")]
726    pub top_logprobs: Option<u8>,
727
728    /// An alternative to sampling with temperature, called nucleus sampling,
729    /// where the model considers the results of the tokens with top_p probability
730    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
731    /// are considered.
732    ///
733    /// We generally recommend altering this or `temperature` but not both.
734    #[serde(skip_serializing_if = "Option::is_none")]
735    pub top_p: Option<f32>,
736
737    ///The truncation strategy to use for the model response.
738    /// - `auto`: If the input to this Response exceeds
739    ///   the model's context window size, the model will truncate the
740    ///   response to fit the context window by dropping items from the beginning of the conversation.
741    /// - `disabled` (default): If the input size will exceed the context window
742    ///   size for a model, the request will fail with a 400 error.
743    #[serde(skip_serializing_if = "Option::is_none")]
744    pub truncation: Option<Truncation>,
745}
746
747#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
748#[serde(untagged)]
749pub enum ResponsePromptVariables {
750    String(String),
751    Content(InputContent),
752    Custom(serde_json::Value),
753}
754
755#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
756pub struct Prompt {
757    /// The unique identifier of the prompt template to use.
758    pub id: String,
759
760    /// Optional version of the prompt template.
761    #[serde(skip_serializing_if = "Option::is_none")]
762    pub version: Option<String>,
763
764    /// Optional map of values to substitute in for variables in your
765    /// prompt. The substitution values can either be strings, or other
766    /// Response input types like images or files.
767    #[serde(skip_serializing_if = "Option::is_none")]
768    pub variables: Option<ResponsePromptVariables>,
769}
770
771#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
772#[serde(rename_all = "lowercase")]
773pub enum ServiceTier {
774    #[default]
775    Auto,
776    Default,
777    Flex,
778    Scale,
779    Priority,
780}
781
782/// Truncation strategies.
783#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
784#[serde(rename_all = "lowercase")]
785pub enum Truncation {
786    Auto,
787    Disabled,
788}
789
790#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
791pub struct Billing {
792    pub payer: String,
793}
794
795/// o-series reasoning settings.
796#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
797#[builder(
798    name = "ReasoningArgs",
799    pattern = "mutable",
800    setter(into, strip_option),
801    default
802)]
803#[builder(build_fn(error = "OpenAIError"))]
804pub struct Reasoning {
805    /// Constrains effort on reasoning for
806    /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
807    /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
808    /// reasoning effort can result in faster responses and fewer tokens used
809    /// on reasoning in a response.
810    ///
811    /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
812    #[serde(skip_serializing_if = "Option::is_none")]
813    pub effort: Option<ReasoningEffort>,
814    /// A summary of the reasoning performed by the model. This can be
815    /// useful for debugging and understanding the model's reasoning process.
816    /// One of `auto`, `concise`, or `detailed`.
817    ///
818    /// `concise` is only supported for `computer-use-preview` models.
819    #[serde(skip_serializing_if = "Option::is_none")]
820    pub summary: Option<ReasoningSummary>,
821}
822
823/// o-series reasoning settings.
824#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
825#[serde(rename_all = "lowercase")]
826pub enum Verbosity {
827    Low,
828    Medium,
829    High,
830}
831
832#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
833#[serde(rename_all = "lowercase")]
834pub enum ReasoningSummary {
835    Auto,
836    Concise,
837    Detailed,
838}
839
840/// The retention policy for the prompt cache.
841#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
842pub enum PromptCacheRetention {
843    #[serde(rename = "in-memory")]
844    InMemory,
845    #[serde(rename = "24h")]
846    Hours24,
847}
848
849/// Configuration for text response format.
850#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
851pub struct ResponseTextParam {
852    /// An object specifying the format that the model must output.
853    ///
854    /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
855    /// which ensures the model will match your supplied JSON schema. Learn more in the
856    /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
857    ///
858    /// The default format is `{ "type": "text" }` with no additional options.
859    ///
860    /// **Not recommended for gpt-4o and newer models:**
861    ///
862    /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
863    /// ensures the message the model generates is valid JSON. Using `json_schema`
864    /// is preferred for models that support it.
865    pub format: TextResponseFormatConfiguration,
866
867    /// Constrains the verbosity of the model's response. Lower values will result in
868    /// more concise responses, while higher values will result in more verbose responses.
869    ///
870    /// Currently supported values are `low`, `medium`, and `high`.
871    #[serde(skip_serializing_if = "Option::is_none")]
872    pub verbosity: Option<Verbosity>,
873}
874
875#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
876#[serde(tag = "type", rename_all = "snake_case")]
877pub enum TextResponseFormatConfiguration {
878    /// Default response format. Used to generate text responses.
879    Text,
880    /// JSON object response format. An older method of generating JSON responses.
881    /// Using `json_schema` is recommended for models that support it.
882    /// Note that the model will not generate JSON without a system or user message
883    /// instructing it to do so.
884    JsonObject,
885    /// JSON Schema response format. Used to generate structured JSON responses.
886    /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
887    JsonSchema(ResponseFormatJsonSchema),
888}
889
890/// Definitions for model-callable tools.
891#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
892#[serde(tag = "type", rename_all = "snake_case")]
893pub enum Tool {
894    /// Defines a function in your own code the model can choose to call. Learn more about [function
895    /// calling](https://platform.openai.com/docs/guides/tools).
896    Function(FunctionTool),
897    /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
898    /// tool](https://platform.openai.com/docs/guides/tools-file-search).
899    FileSearch(FileSearchTool),
900    /// A tool that controls a virtual computer. Learn more about the [computer
901    /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
902    ComputerUsePreview(ComputerUsePreviewTool),
903    /// Search the Internet for sources related to the prompt. Learn more about the
904    /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
905    WebSearch(WebSearchTool),
906    /// type: web_search_2025_08_26
907    #[serde(rename = "web_search_2025_08_26")]
908    WebSearch20250826(WebSearchTool),
909    /// Give the model access to additional tools via remote Model Context Protocol
910    /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
911    Mcp(MCPTool),
912    /// A tool that runs Python code to help generate a response to a prompt.
913    CodeInterpreter(CodeInterpreterTool),
914    /// A tool that generates images using a model like `gpt-image-1`.
915    ImageGeneration(ImageGenTool),
916    /// A tool that allows the model to execute shell commands in a local environment.
917    LocalShell,
918    /// A tool that allows the model to execute shell commands.
919    Shell,
920    /// A custom tool that processes input using a specified format. Learn more about   [custom
921    /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
922    Custom(CustomToolParam),
923    /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
924    ///tool](https://platform.openai.com/docs/guides/tools-web-search).
925    WebSearchPreview(WebSearchTool),
926    /// type: web_search_preview_2025_03_11
927    #[serde(rename = "web_search_preview_2025_03_11")]
928    WebSearchPreview20250311(WebSearchTool),
929    /// Allows the assistant to create, delete, or update files using unified diffs.
930    ApplyPatch,
931}
932
933#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
934pub struct CustomToolParam {
935    /// The name of the custom tool, used to identify it in tool calls.
936    pub name: String,
937    /// Optional description of the custom tool, used to provide more context.
938    pub description: Option<String>,
939    /// The input format for the custom tool. Default is unconstrained text.
940    pub format: CustomToolParamFormat,
941}
942
943#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
944#[serde(rename_all = "lowercase")]
945pub enum GrammarSyntax {
946    Lark,
947    #[default]
948    Regex,
949}
950
951#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
952pub struct CustomGrammarFormatParam {
953    /// The grammar definition.
954    pub definition: String,
955    /// The syntax of the grammar definition. One of `lark` or `regex`.
956    pub syntax: GrammarSyntax,
957}
958
959#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
960#[serde(tag = "type", rename_all = "lowercase")]
961pub enum CustomToolParamFormat {
962    /// Unconstrained free-form text.
963    #[default]
964    Text,
965    /// A grammar defined by the user.
966    Grammar(CustomGrammarFormatParam),
967}
968
969#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
970#[builder(
971    name = "FileSearchToolArgs",
972    pattern = "mutable",
973    setter(into, strip_option),
974    default
975)]
976#[builder(build_fn(error = "OpenAIError"))]
977pub struct FileSearchTool {
978    /// The IDs of the vector stores to search.
979    pub vector_store_ids: Vec<String>,
980    /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
981    #[serde(skip_serializing_if = "Option::is_none")]
982    pub max_num_results: Option<u32>,
983    /// A filter to apply.
984    #[serde(skip_serializing_if = "Option::is_none")]
985    pub filters: Option<Filter>,
986    /// Ranking options for search.
987    #[serde(skip_serializing_if = "Option::is_none")]
988    pub ranking_options: Option<RankingOptions>,
989}
990
991#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
992#[builder(
993    name = "FunctionToolArgs",
994    pattern = "mutable",
995    setter(into, strip_option),
996    default
997)]
998pub struct FunctionTool {
999    /// The name of the function to call.
1000    pub name: String,
1001    /// A JSON schema object describing the parameters of the function.
1002    #[serde(skip_serializing_if = "Option::is_none")]
1003    pub parameters: Option<serde_json::Value>,
1004    /// Whether to enforce strict parameter validation. Default `true`.
1005    #[serde(skip_serializing_if = "Option::is_none")]
1006    pub strict: Option<bool>,
1007    /// A description of the function. Used by the model to determine whether or not to call the
1008    /// function.
1009    #[serde(skip_serializing_if = "Option::is_none")]
1010    pub description: Option<String>,
1011}
1012
1013#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1014pub struct WebSearchToolFilters {
1015    /// Allowed domains for the search. If not provided, all domains are allowed.
1016    /// Subdomains of the provided domains are allowed as well.
1017    ///
1018    /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1019    #[serde(skip_serializing_if = "Option::is_none")]
1020    pub allowed_domains: Option<Vec<String>>,
1021}
1022
1023#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1024#[builder(
1025    name = "WebSearchToolArgs",
1026    pattern = "mutable",
1027    setter(into, strip_option),
1028    default
1029)]
1030pub struct WebSearchTool {
1031    /// Filters for the search.
1032    #[serde(skip_serializing_if = "Option::is_none")]
1033    pub filters: Option<WebSearchToolFilters>,
1034    /// The approximate location of the user.
1035    #[serde(skip_serializing_if = "Option::is_none")]
1036    pub user_location: Option<WebSearchApproximateLocation>,
1037    /// High level guidance for the amount of context window space to use for the search. One of `low`,
1038    /// `medium`, or `high`. `medium` is the default.
1039    #[serde(skip_serializing_if = "Option::is_none")]
1040    pub search_context_size: Option<WebSearchToolSearchContextSize>,
1041}
1042
1043#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1044#[serde(rename_all = "lowercase")]
1045pub enum WebSearchToolSearchContextSize {
1046    Low,
1047    #[default]
1048    Medium,
1049    High,
1050}
1051
1052#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1053#[serde(rename_all = "lowercase")]
1054pub enum ComputerEnvironment {
1055    Windows,
1056    Mac,
1057    Linux,
1058    Ubuntu,
1059    #[default]
1060    Browser,
1061}
1062
1063#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1064#[builder(
1065    name = "ComputerUsePreviewToolArgs",
1066    pattern = "mutable",
1067    setter(into, strip_option),
1068    default
1069)]
1070pub struct ComputerUsePreviewTool {
1071    /// The type of computer environment to control.
1072    environment: ComputerEnvironment,
1073    /// The width of the computer display.
1074    display_width: u32,
1075    /// The height of the computer display.
1076    display_height: u32,
1077}
1078
1079#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1080pub enum RankVersionType {
1081    #[serde(rename = "auto")]
1082    Auto,
1083    #[serde(rename = "default-2024-11-15")]
1084    Default20241115,
1085}
1086
1087#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1088pub struct HybridSearch {
1089    /// The weight of the embedding in the reciprocal ranking fusion.
1090    pub embedding_weight: f32,
1091    /// The weight of the text in the reciprocal ranking fusion.
1092    pub text_weight: f32,
1093}
1094
1095/// Options for search result ranking.
1096#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1097pub struct RankingOptions {
1098    /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1099    /// sparse keyword matches when hybrid search is enabled.
1100    #[serde(skip_serializing_if = "Option::is_none")]
1101    pub hybrid_search: Option<HybridSearch>,
1102    /// The ranker to use for the file search.
1103    pub ranker: RankVersionType,
1104    /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1105    /// attempt to return only the most relevant results, but may return fewer results.
1106    #[serde(skip_serializing_if = "Option::is_none")]
1107    pub score_threshold: Option<f32>,
1108}
1109
1110/// Filters for file search.
1111#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1112#[serde(untagged)]
1113pub enum Filter {
1114    /// A filter used to compare a specified attribute key to a given value using a defined
1115    /// comparison operation.
1116    Comparison(ComparisonFilter),
1117    /// Combine multiple filters using `and` or `or`.
1118    Compound(CompoundFilter),
1119}
1120
1121/// Single comparison filter.
1122#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1123pub struct ComparisonFilter {
1124    /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`.
1125    /// - `eq`: equals
1126    /// - `ne`: not equal
1127    /// - `gt`: greater than
1128    /// - `gte`: greater than or equal
1129    /// - `lt`: less than
1130    /// - `lte`: less than or equal
1131    /// - `in`: in
1132    /// - `nin`: not in
1133    pub r#type: ComparisonType,
1134    /// The key to compare against the value.
1135    pub key: String,
1136    /// The value to compare against the attribute key; supports string, number, or boolean types.
1137    pub value: serde_json::Value,
1138}
1139
1140#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1141pub enum ComparisonType {
1142    #[serde(rename = "eq")]
1143    Equals,
1144    #[serde(rename = "ne")]
1145    NotEquals,
1146    #[serde(rename = "gt")]
1147    GreaterThan,
1148    #[serde(rename = "gte")]
1149    GreaterThanOrEqual,
1150    #[serde(rename = "lt")]
1151    LessThan,
1152    #[serde(rename = "lte")]
1153    LessThanOrEqual,
1154    #[serde(rename = "in")]
1155    In,
1156    #[serde(rename = "nin")]
1157    NotIn,
1158}
1159
1160/// Combine multiple filters using `and` or `or`.
1161#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1162pub struct CompoundFilter {
1163    /// 'Type of operation: `and` or `or`.'
1164    pub r#type: CompoundType,
1165    /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
1166    pub filters: Vec<Filter>,
1167}
1168
1169#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1170#[serde(rename_all = "lowercase")]
1171pub enum CompoundType {
1172    And,
1173    Or,
1174}
1175
1176#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1177#[serde(rename_all = "lowercase")]
1178pub enum WebSearchApproximateLocationType {
1179    #[default]
1180    Approximate,
1181}
1182
1183/// Approximate user location for web search.
1184#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1185#[builder(
1186    name = "WebSearchApproximateLocationArgs",
1187    pattern = "mutable",
1188    setter(into, strip_option),
1189    default
1190)]
1191#[builder(build_fn(error = "OpenAIError"))]
1192pub struct WebSearchApproximateLocation {
1193    /// The type of location approximation. Always `approximate`.
1194    pub r#type: WebSearchApproximateLocationType,
1195    /// Free text input for the city of the user, e.g. `San Francisco`.
1196    #[serde(skip_serializing_if = "Option::is_none")]
1197    pub city: Option<String>,
1198    /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1199    /// e.g. `US`.
1200    #[serde(skip_serializing_if = "Option::is_none")]
1201    pub country: Option<String>,
1202    /// Free text input for the region of the user, e.g. `California`.
1203    #[serde(skip_serializing_if = "Option::is_none")]
1204    pub region: Option<String>,
1205    /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1206    /// `America/Los_Angeles`.
1207    #[serde(skip_serializing_if = "Option::is_none")]
1208    pub timezone: Option<String>,
1209}
1210
1211/// Container configuration for a code interpreter.
1212#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1213#[serde(tag = "type", rename_all = "snake_case")]
1214pub enum CodeInterpreterToolContainer {
1215    /// Configuration for a code interpreter container. Optionally specify the IDs of the
1216    /// files to run the code on.
1217    Auto(CodeInterpreterContainerAuto),
1218
1219    /// The container ID.
1220    #[serde(untagged)]
1221    ContainerID(String),
1222}
1223
1224/// Auto configuration for code interpreter container.
1225#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1226pub struct CodeInterpreterContainerAuto {
1227    /// An optional list of uploaded files to make available to your code.
1228    #[serde(skip_serializing_if = "Option::is_none")]
1229    pub file_ids: Option<Vec<String>>,
1230
1231    #[serde(skip_serializing_if = "Option::is_none")]
1232    pub memory_limit: Option<u64>,
1233}
1234
1235#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1236#[builder(
1237    name = "CodeInterpreterToolArgs",
1238    pattern = "mutable",
1239    setter(into, strip_option),
1240    default
1241)]
1242#[builder(build_fn(error = "OpenAIError"))]
1243pub struct CodeInterpreterTool {
1244    /// The code interpreter container. Can be a container ID or an object that
1245    /// specifies uploaded file IDs to make available to your code.
1246    pub container: CodeInterpreterToolContainer,
1247}
1248
1249#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1250pub struct ImageGenToolInputImageMask {
1251    /// Base64-encoded mask image.
1252    #[serde(skip_serializing_if = "Option::is_none")]
1253    pub image_url: Option<String>,
1254    /// File ID for the mask image.
1255    #[serde(skip_serializing_if = "Option::is_none")]
1256    pub file_id: Option<String>,
1257}
1258
1259#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1260#[serde(rename_all = "lowercase")]
1261pub enum InputFidelity {
1262    #[default]
1263    High,
1264    Low,
1265}
1266
1267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1268#[serde(rename_all = "lowercase")]
1269pub enum ImageGenToolModeration {
1270    #[default]
1271    Auto,
1272    Low,
1273}
1274
1275/// Image generation tool definition.
1276#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1277#[builder(
1278    name = "ImageGenerationArgs",
1279    pattern = "mutable",
1280    setter(into, strip_option),
1281    default
1282)]
1283#[builder(build_fn(error = "OpenAIError"))]
1284pub struct ImageGenTool {
1285    /// Background type for the generated image. One of `transparent`,
1286    /// `opaque`, or `auto`. Default: `auto`.
1287    #[serde(skip_serializing_if = "Option::is_none")]
1288    pub background: Option<ImageGenToolBackground>,
1289    /// Control how much effort the model will exert to match the style and features, especially facial features,
1290    /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1291    /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1292    #[serde(skip_serializing_if = "Option::is_none")]
1293    pub input_fidelity: Option<InputFidelity>,
1294    /// Optional mask for inpainting. Contains `image_url`
1295    /// (string, optional) and `file_id` (string, optional).
1296    #[serde(skip_serializing_if = "Option::is_none")]
1297    pub input_image_mask: Option<ImageGenToolInputImageMask>,
1298    /// The image generation model to use. Default: `gpt-image-1`.
1299    #[serde(skip_serializing_if = "Option::is_none")]
1300    pub model: Option<String>,
1301    /// Moderation level for the generated image. Default: `auto`.
1302    #[serde(skip_serializing_if = "Option::is_none")]
1303    pub moderation: Option<ImageGenToolModeration>,
1304    /// Compression level for the output image. Default: 100.
1305    #[serde(skip_serializing_if = "Option::is_none")]
1306    pub output_compression: Option<u8>,
1307    /// The output format of the generated image. One of `png`, `webp`, or
1308    /// `jpeg`. Default: `png`.
1309    #[serde(skip_serializing_if = "Option::is_none")]
1310    pub output_format: Option<ImageGenToolOutputFormat>,
1311    /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1312    #[serde(skip_serializing_if = "Option::is_none")]
1313    pub partial_images: Option<u8>,
1314    /// The quality of the generated image. One of `low`, `medium`, `high`,
1315    /// or `auto`. Default: `auto`.
1316    #[serde(skip_serializing_if = "Option::is_none")]
1317    pub quality: Option<ImageGenToolQuality>,
1318    /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1319    /// `1536x1024`, or `auto`. Default: `auto`.
1320    #[serde(skip_serializing_if = "Option::is_none")]
1321    pub size: Option<ImageGenToolSize>,
1322}
1323
1324#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1325#[serde(rename_all = "lowercase")]
1326pub enum ImageGenToolBackground {
1327    Transparent,
1328    Opaque,
1329    #[default]
1330    Auto,
1331}
1332
1333#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1334#[serde(rename_all = "lowercase")]
1335pub enum ImageGenToolOutputFormat {
1336    #[default]
1337    Png,
1338    Webp,
1339    Jpeg,
1340}
1341
1342#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1343#[serde(rename_all = "lowercase")]
1344pub enum ImageGenToolQuality {
1345    Low,
1346    Medium,
1347    High,
1348    #[default]
1349    Auto,
1350}
1351
1352#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1353#[serde(rename_all = "lowercase")]
1354pub enum ImageGenToolSize {
1355    #[default]
1356    Auto,
1357    #[serde(rename = "1024x1024")]
1358    Size1024x1024,
1359    #[serde(rename = "1024x1536")]
1360    Size1024x1536,
1361    #[serde(rename = "1536x1024")]
1362    Size1536x1024,
1363}
1364
1365#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1366#[serde(rename_all = "lowercase")]
1367pub enum ToolChoiceAllowedMode {
1368    Auto,
1369    Required,
1370}
1371
1372#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1373pub struct ToolChoiceAllowed {
1374    /// Constrains the tools available to the model to a pre-defined set.
1375    ///
1376    /// `auto` allows the model to pick from among the allowed tools and generate a
1377    /// message.
1378    ///
1379    /// `required` requires the model to call one or more of the allowed tools.
1380    pub mode: ToolChoiceAllowedMode,
1381    /// A list of tool definitions that the model should be allowed to call.
1382    ///
1383    /// For the Responses API, the list of tool definitions might look like:
1384    /// ```json
1385    /// [
1386    ///   { "type": "function", "name": "get_weather" },
1387    ///   { "type": "mcp", "server_label": "deepwiki" },
1388    ///   { "type": "image_generation" }
1389    /// ]
1390    /// ```
1391    pub tools: Vec<serde_json::Value>,
1392}
1393
1394/// The type of hosted tool the model should to use. Learn more about
1395/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1396#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1397#[serde(tag = "type", rename_all = "snake_case")]
1398pub enum ToolChoiceTypes {
1399    FileSearch,
1400    WebSearchPreview,
1401    ComputerUsePreview,
1402    CodeInterpreter,
1403    ImageGeneration,
1404}
1405
1406#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1407pub struct ToolChoiceFunction {
1408    /// The name of the function to call.
1409    pub name: String,
1410}
1411
1412#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1413pub struct ToolChoiceMCP {
1414    /// The name of the tool to call on the server.
1415    pub name: String,
1416    /// The label of the MCP server to use.
1417    pub server_label: String,
1418}
1419
1420#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1421pub struct ToolChoiceCustom {
1422    /// The name of the custom tool to call.
1423    pub name: String,
1424}
1425
1426#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1427#[serde(tag = "type", rename_all = "snake_case")]
1428pub enum ToolChoiceParam {
1429    /// Constrains the tools available to the model to a pre-defined set.
1430    AllowedTools(ToolChoiceAllowed),
1431
1432    /// Use this option to force the model to call a specific function.
1433    Function(ToolChoiceFunction),
1434
1435    /// Use this option to force the model to call a specific tool on a remote MCP server.
1436    Mcp(ToolChoiceMCP),
1437
1438    /// Use this option to force the model to call a custom tool.
1439    Custom(ToolChoiceCustom),
1440
1441    /// Forces the model to call the apply_patch tool when executing a tool call.
1442    ApplyPatch,
1443
1444    /// Forces the model to call the function shell tool when a tool call is required.
1445    Shell,
1446
1447    /// Indicates that the model should use a built-in tool to generate a response.
1448    /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1449    #[serde(untagged)]
1450    Hosted(ToolChoiceTypes),
1451
1452    /// Controls which (if any) tool is called by the model.
1453    ///
1454    /// `none` means the model will not call any tool and instead generates a message.
1455    ///
1456    /// `auto` means the model can pick between generating a message or calling one or
1457    /// more tools.
1458    ///
1459    /// `required` means the model must call one or more tools.
1460    #[serde(untagged)]
1461    Mode(ToolChoiceOptions),
1462}
1463
1464#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1465#[serde(rename_all = "lowercase")]
1466pub enum ToolChoiceOptions {
1467    None,
1468    Auto,
1469    Required,
1470}
1471
1472/// Error returned by the API when a request fails.
1473#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1474pub struct ErrorObject {
1475    /// The error code for the response.
1476    pub code: String,
1477    /// A human-readable description of the error.
1478    pub message: String,
1479}
1480
1481/// Details about an incomplete response.
1482#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1483pub struct IncompleteDetails {
1484    /// The reason why the response is incomplete.
1485    pub reason: String,
1486}
1487
1488#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1489pub struct TopLogProb {
1490    pub bytes: Vec<u8>,
1491    pub logprob: f64,
1492    pub token: String,
1493}
1494
1495#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1496pub struct LogProb {
1497    pub bytes: Vec<u8>,
1498    pub logprob: f64,
1499    pub token: String,
1500    pub top_logprobs: Vec<TopLogProb>,
1501}
1502
1503#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1504pub struct ResponseTopLobProb {
1505    /// The log probability of this token.
1506    pub logprob: f64,
1507    /// A possible text token.
1508    pub token: String,
1509}
1510
1511#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1512pub struct ResponseLogProb {
1513    /// The log probability of this token.
1514    pub logprob: f64,
1515    /// A possible text token.
1516    pub token: String,
1517    /// The log probability of the top 20 most likely tokens.
1518    pub top_logprobs: Vec<ResponseTopLobProb>,
1519}
1520
1521/// A simple text output from the model.
1522#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1523pub struct OutputTextContent {
1524    /// The annotations of the text output.
1525    pub annotations: Vec<Annotation>,
1526    pub logprobs: Option<Vec<LogProb>>,
1527    /// The text output from the model.
1528    pub text: String,
1529}
1530
1531#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1532#[serde(tag = "type", rename_all = "snake_case")]
1533pub enum Annotation {
1534    /// A citation to a file.
1535    FileCitation(FileCitationBody),
1536    /// A citation for a web resource used to generate a model response.
1537    UrlCitation(UrlCitationBody),
1538    /// A citation for a container file used to generate a model response.
1539    ContainerFileCitation(ContainerFileCitationBody),
1540    /// A path to a file.
1541    FilePath(FilePath),
1542}
1543
1544#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1545pub struct FileCitationBody {
1546    /// The ID of the file.
1547    file_id: String,
1548    /// The filename of the file cited.
1549    filename: String,
1550    /// The index of the file in the list of files.
1551    index: u32,
1552}
1553
1554#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1555pub struct UrlCitationBody {
1556    /// The index of the last character of the URL citation in the message.
1557    end_index: u32,
1558    /// The index of the first character of the URL citation in the message.
1559    start_index: u32,
1560    /// The title of the web resource.
1561    title: String,
1562    /// The URL of the web resource.
1563    url: String,
1564}
1565
1566#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1567pub struct ContainerFileCitationBody {
1568    /// The ID of the container file.
1569    container_id: String,
1570    /// The index of the last character of the container file citation in the message.
1571    end_index: u32,
1572    /// The ID of the file.
1573    file_id: String,
1574    /// The filename of the container file cited.
1575    filename: String,
1576    /// The index of the first character of the container file citation in the message.
1577    start_index: u32,
1578}
1579
1580#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1581pub struct FilePath {
1582    /// The ID of the file.
1583    file_id: String,
1584    /// The index of the file in the list of files.
1585    index: u32,
1586}
1587
1588/// A refusal explanation from the model.
1589#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1590pub struct RefusalContent {
1591    /// The refusal explanation from the model.
1592    pub refusal: String,
1593}
1594
1595/// A message generated by the model.
1596#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1597pub struct OutputMessage {
1598    /// The content of the output message.
1599    pub content: Vec<OutputMessageContent>,
1600    /// The unique ID of the output message.
1601    pub id: String,
1602    /// The role of the output message. Always `assistant`.
1603    pub role: AssistantRole,
1604    /// The status of the message input. One of `in_progress`, `completed`, or
1605    /// `incomplete`. Populated when input items are returned via API.
1606    pub status: OutputStatus,
1607    ///// The type of the output message. Always `message`.
1608    //pub r#type: MessageType,
1609}
1610
1611#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1612#[serde(rename_all = "lowercase")]
1613pub enum MessageType {
1614    #[default]
1615    Message,
1616}
1617
1618/// The role for an output message - always `assistant`.
1619/// This type ensures type safety by only allowing the assistant role.
1620#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1621#[serde(rename_all = "lowercase")]
1622pub enum AssistantRole {
1623    #[default]
1624    Assistant,
1625}
1626
1627#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1628#[serde(tag = "type", rename_all = "snake_case")]
1629pub enum OutputMessageContent {
1630    /// A text output from the model.
1631    OutputText(OutputTextContent),
1632    /// A refusal from the model.
1633    Refusal(RefusalContent),
1634}
1635
1636#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1637#[serde(tag = "type", rename_all = "snake_case")]
1638pub enum OutputContent {
1639    /// A text output from the model.
1640    OutputText(OutputTextContent),
1641    /// A refusal from the model.
1642    Refusal(RefusalContent),
1643    /// Reasoning text from the model.
1644    ReasoningText(ReasoningTextContent),
1645}
1646
1647#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1648pub struct ReasoningTextContent {
1649    /// The reasoning text from the model.
1650    pub text: String,
1651}
1652
1653/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1654#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1655pub struct ReasoningItem {
1656    /// Unique identifier of the reasoning content.
1657    pub id: String,
1658    /// Reasoning summary content.
1659    pub summary: Vec<SummaryPart>,
1660    /// Reasoning text content.
1661    #[serde(skip_serializing_if = "Option::is_none")]
1662    pub content: Option<Vec<ReasoningTextContent>>,
1663    /// The encrypted content of the reasoning item - populated when a response is generated with
1664    /// `reasoning.encrypted_content` in the `include` parameter.
1665    #[serde(skip_serializing_if = "Option::is_none")]
1666    pub encrypted_content: Option<String>,
1667    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1668    /// Populated when items are returned via API.
1669    #[serde(skip_serializing_if = "Option::is_none")]
1670    pub status: Option<OutputStatus>,
1671}
1672
1673/// A single summary text fragment from reasoning.
1674#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1675pub struct Summary {
1676    /// A summary of the reasoning output from the model so far.
1677    pub text: String,
1678}
1679
1680#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1681#[serde(tag = "type", rename_all = "snake_case")]
1682pub enum SummaryPart {
1683    SummaryText(Summary),
1684}
1685
1686/// File search tool call output.
1687#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1688pub struct FileSearchToolCall {
1689    /// The unique ID of the file search tool call.
1690    pub id: String,
1691    /// The queries used to search for files.
1692    pub queries: Vec<String>,
1693    /// The status of the file search tool call. One of `in_progress`, `searching`,
1694    /// `incomplete`,`failed`, or `completed`.
1695    pub status: FileSearchToolCallStatus,
1696    /// The results of the file search tool call.
1697    #[serde(skip_serializing_if = "Option::is_none")]
1698    pub results: Option<Vec<FileSearchToolCallResult>>,
1699}
1700
1701#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1702#[serde(rename_all = "snake_case")]
1703pub enum FileSearchToolCallStatus {
1704    InProgress,
1705    Searching,
1706    Incomplete,
1707    Failed,
1708    Completed,
1709}
1710
1711/// A single result from a file search.
1712#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1713pub struct FileSearchToolCallResult {
1714    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1715    /// additional information about the object in a structured format, and querying for objects
1716    /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1717    /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1718    pub attributes: HashMap<String, serde_json::Value>,
1719    /// The unique ID of the file.
1720    pub file_id: String,
1721    /// The name of the file.
1722    pub filename: String,
1723    /// The relevance score of the file - a value between 0 and 1.
1724    pub score: f32,
1725    /// The text that was retrieved from the file.
1726    pub text: String,
1727}
1728
1729#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1730pub struct ComputerCallSafetyCheckParam {
1731    /// The ID of the pending safety check.
1732    pub id: String,
1733    /// The type of the pending safety check.
1734    #[serde(skip_serializing_if = "Option::is_none")]
1735    pub code: Option<String>,
1736    /// Details about the pending safety check.
1737    #[serde(skip_serializing_if = "Option::is_none")]
1738    pub message: Option<String>,
1739}
1740
1741#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1742#[serde(rename_all = "snake_case")]
1743pub enum WebSearchToolCallStatus {
1744    InProgress,
1745    Searching,
1746    Completed,
1747    Failed,
1748}
1749
1750#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1751pub struct WebSearchActionSearchSource {
1752    /// The type of source. Always `url`.
1753    pub r#type: String,
1754    /// The URL of the source.
1755    pub url: String,
1756}
1757
1758#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1759pub struct WebSearchActionSearch {
1760    /// The search query.
1761    pub query: String,
1762    /// The sources used in the search.
1763    pub sources: Option<Vec<WebSearchActionSearchSource>>,
1764}
1765
1766#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1767pub struct WebSearchActionOpenPage {
1768    /// The URL opened by the model.
1769    pub url: String,
1770}
1771
1772#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1773pub struct WebSearchActionFind {
1774    /// The URL of the page searched for the pattern.
1775    pub url: String,
1776    /// The pattern or text to search for within the page.
1777    pub pattern: String,
1778}
1779
1780#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1781#[serde(tag = "type", rename_all = "snake_case")]
1782pub enum WebSearchToolCallAction {
1783    /// Action type "search" - Performs a web search query.
1784    Search(WebSearchActionSearch),
1785    /// Action type "open_page" - Opens a specific URL from search results.
1786    OpenPage(WebSearchActionOpenPage),
1787    /// Action type "find": Searches for a pattern within a loaded page.
1788    Find(WebSearchActionFind),
1789}
1790
1791/// Web search tool call output.
1792#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1793pub struct WebSearchToolCall {
1794    /// An object describing the specific action taken in this web search call. Includes
1795    /// details on how the model used the web (search, open_page, find).
1796    pub action: WebSearchToolCallAction,
1797    /// The unique ID of the web search tool call.
1798    pub id: String,
1799    /// The status of the web search tool call.
1800    pub status: WebSearchToolCallStatus,
1801}
1802
1803/// Output from a computer tool call.
1804#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1805pub struct ComputerToolCall {
1806    pub action: ComputerAction,
1807    /// An identifier used when responding to the tool call with output.
1808    pub call_id: String,
1809    /// The unique ID of the computer call.
1810    pub id: String,
1811    /// The pending safety checks for the computer call.
1812    pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1813    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1814    /// Populated when items are returned via API.
1815    pub status: OutputStatus,
1816}
1817
1818/// A point in 2D space.
1819#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1820pub struct DragPoint {
1821    /// The x-coordinate.
1822    pub x: i32,
1823    /// The y-coordinate.
1824    pub y: i32,
1825}
1826
1827/// Represents all user‐triggered actions.
1828#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1829#[serde(tag = "type", rename_all = "snake_case")]
1830pub enum ComputerAction {
1831    /// A click action.
1832    Click(ClickParam),
1833
1834    /// A double click action.
1835    DoubleClick(DoubleClickAction),
1836
1837    /// A drag action.
1838    Drag(Drag),
1839
1840    /// A collection of keypresses the model would like to perform.
1841    Keypress(KeyPressAction),
1842
1843    /// A mouse move action.
1844    Move(Move),
1845
1846    /// A screenshot action.
1847    Screenshot,
1848
1849    /// A scroll action.
1850    Scroll(Scroll),
1851
1852    /// An action to type in text.
1853    Type(Type),
1854
1855    /// A wait action.
1856    Wait,
1857}
1858
1859#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1860#[serde(rename_all = "lowercase")]
1861pub enum ClickButtonType {
1862    Left,
1863    Right,
1864    Wheel,
1865    Back,
1866    Forward,
1867}
1868
1869/// A click action.
1870#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1871pub struct ClickParam {
1872    /// Indicates which mouse button was pressed during the click. One of `left`,
1873    /// `right`, `wheel`, `back`, or `forward`.
1874    pub button: ClickButtonType,
1875    /// The x-coordinate where the click occurred.
1876    pub x: i32,
1877    /// The y-coordinate where the click occurred.
1878    pub y: i32,
1879}
1880
1881/// A double click action.
1882#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1883pub struct DoubleClickAction {
1884    /// The x-coordinate where the double click occurred.
1885    pub x: i32,
1886    /// The y-coordinate where the double click occurred.
1887    pub y: i32,
1888}
1889
1890/// A drag action.
1891#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1892pub struct Drag {
1893    /// The path of points the cursor drags through.
1894    pub path: Vec<DragPoint>,
1895}
1896
1897/// A keypress action.
1898#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1899pub struct KeyPressAction {
1900    /// The combination of keys the model is requesting to be pressed.
1901    /// This is an array of strings, each representing a key.
1902    pub keys: Vec<String>,
1903}
1904
1905/// A mouse move action.
1906#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1907pub struct Move {
1908    /// The x-coordinate to move to.
1909    pub x: i32,
1910    /// The y-coordinate to move to.
1911    pub y: i32,
1912}
1913
1914/// A scroll action.
1915#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1916pub struct Scroll {
1917    /// The horizontal scroll distance.
1918    pub scroll_x: i32,
1919    /// The vertical scroll distance.
1920    pub scroll_y: i32,
1921    /// The x-coordinate where the scroll occurred.
1922    pub x: i32,
1923    /// The y-coordinate where the scroll occurred.
1924    pub y: i32,
1925}
1926
1927/// A typing (text entry) action.
1928#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1929pub struct Type {
1930    /// The text to type.
1931    pub text: String,
1932}
1933
1934#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1935pub struct FunctionToolCall {
1936    /// A JSON string of the arguments to pass to the function.
1937    pub arguments: String,
1938    /// The unique ID of the function tool call generated by the model.
1939    pub call_id: String,
1940    /// The name of the function to run.
1941    pub name: String,
1942    /// The unique ID of the function tool call.
1943    #[serde(skip_serializing_if = "Option::is_none")]
1944    pub id: Option<String>,
1945    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1946    /// Populated when items are returned via API.
1947    #[serde(skip_serializing_if = "Option::is_none")]
1948    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1949}
1950
1951#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1952#[serde(rename_all = "snake_case")]
1953pub enum ImageGenToolCallStatus {
1954    InProgress,
1955    Completed,
1956    Generating,
1957    Failed,
1958}
1959
1960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1961pub struct ImageGenToolCall {
1962    /// The unique ID of the image generation call.
1963    pub id: String,
1964    /// The generated image encoded in base64.
1965    pub result: Option<String>,
1966    /// The status of the image generation call.
1967    pub status: ImageGenToolCallStatus,
1968}
1969
1970#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1971#[serde(rename_all = "snake_case")]
1972pub enum CodeInterpreterToolCallStatus {
1973    InProgress,
1974    Completed,
1975    Incomplete,
1976    Interpreting,
1977    Failed,
1978}
1979
1980/// Output of a code interpreter request.
1981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1982pub struct CodeInterpreterToolCall {
1983    /// The code to run, or null if not available.
1984    #[serde(skip_serializing_if = "Option::is_none")]
1985    pub code: Option<String>,
1986    /// ID of the container used to run the code.
1987    pub container_id: String,
1988    /// The unique ID of the code interpreter tool call.
1989    pub id: String,
1990    /// The outputs generated by the code interpreter, such as logs or images.
1991    /// Can be null if no outputs are available.
1992    #[serde(skip_serializing_if = "Option::is_none")]
1993    pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1994    /// The status of the code interpreter tool call.
1995    /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1996    pub status: CodeInterpreterToolCallStatus,
1997}
1998
1999/// Individual result from a code interpreter: either logs or files.
2000#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2001#[serde(tag = "type", rename_all = "snake_case")]
2002pub enum CodeInterpreterToolCallOutput {
2003    /// Code interpreter output logs
2004    Logs(CodeInterpreterOutputLogs),
2005    /// Code interpreter output image
2006    Image(CodeInterpreterOutputImage),
2007}
2008
2009#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2010pub struct CodeInterpreterOutputLogs {
2011    /// The logs output from the code interpreter.
2012    pub logs: String,
2013}
2014
2015#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2016pub struct CodeInterpreterOutputImage {
2017    /// The URL of the image output from the code interpreter.
2018    pub url: String,
2019}
2020
2021#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2022pub struct CodeInterpreterFile {
2023    /// The ID of the file.
2024    file_id: String,
2025    /// The MIME type of the file.
2026    mime_type: String,
2027}
2028
2029#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2030pub struct LocalShellToolCall {
2031    /// Execute a shell command on the server.
2032    pub action: LocalShellExecAction,
2033    /// The unique ID of the local shell tool call generated by the model.
2034    pub call_id: String,
2035    /// The unique ID of the local shell call.
2036    pub id: String,
2037    /// The status of the local shell call.
2038    pub status: OutputStatus,
2039}
2040
2041/// Define the shape of a local shell action (exec).
2042#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2043pub struct LocalShellExecAction {
2044    /// The command to run.
2045    pub command: Vec<String>,
2046    /// Environment variables to set for the command.
2047    pub env: HashMap<String, String>,
2048    /// Optional timeout in milliseconds for the command.
2049    pub timeout_ms: Option<u64>,
2050    /// Optional user to run the command as.
2051    pub user: Option<String>,
2052    /// Optional working directory to run the command in.
2053    pub working_directory: Option<String>,
2054}
2055
2056/// Commands and limits describing how to run the function shell tool call.
2057#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2058pub struct FunctionShellActionParam {
2059    /// Ordered shell commands for the execution environment to run.
2060    pub commands: Vec<String>,
2061    /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
2062    #[serde(skip_serializing_if = "Option::is_none")]
2063    pub timeout_ms: Option<u64>,
2064    /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
2065    #[serde(skip_serializing_if = "Option::is_none")]
2066    pub max_output_length: Option<u64>,
2067}
2068
2069/// Status values reported for function shell tool calls.
2070#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2071#[serde(rename_all = "snake_case")]
2072pub enum FunctionShellCallItemStatus {
2073    InProgress,
2074    Completed,
2075    Incomplete,
2076}
2077
2078/// A tool representing a request to execute one or more shell commands.
2079#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2080pub struct FunctionShellCallItemParam {
2081    /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2082    #[serde(skip_serializing_if = "Option::is_none")]
2083    pub id: Option<String>,
2084    /// The unique ID of the function shell tool call generated by the model.
2085    pub call_id: String,
2086    /// The shell commands and limits that describe how to run the tool call.
2087    pub action: FunctionShellActionParam,
2088    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2089    #[serde(skip_serializing_if = "Option::is_none")]
2090    pub status: Option<FunctionShellCallItemStatus>,
2091}
2092
2093/// Indicates that the shell commands finished and returned an exit code.
2094#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2095pub struct FunctionShellCallOutputExitOutcomeParam {
2096    /// The exit code returned by the shell process.
2097    pub exit_code: i32,
2098}
2099
2100/// The exit or timeout outcome associated with this chunk.
2101#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2102#[serde(tag = "type", rename_all = "snake_case")]
2103pub enum FunctionShellCallOutputOutcomeParam {
2104    Timeout,
2105    Exit(FunctionShellCallOutputExitOutcomeParam),
2106}
2107
2108/// Captured stdout and stderr for a portion of a function shell tool call output.
2109#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2110pub struct FunctionShellCallOutputContentParam {
2111    /// Captured stdout output for this chunk of the shell call.
2112    pub stdout: String,
2113    /// Captured stderr output for this chunk of the shell call.
2114    pub stderr: String,
2115    /// The exit or timeout outcome associated with this chunk.
2116    pub outcome: FunctionShellCallOutputOutcomeParam,
2117}
2118
2119/// The streamed output items emitted by a function shell tool call.
2120#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2121pub struct FunctionShellCallOutputItemParam {
2122    /// The unique ID of the function shell tool call output. Populated when this item is returned via API.
2123    #[serde(skip_serializing_if = "Option::is_none")]
2124    pub id: Option<String>,
2125    /// The unique ID of the function shell tool call generated by the model.
2126    pub call_id: String,
2127    /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2128    pub output: Vec<FunctionShellCallOutputContentParam>,
2129    /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2130    #[serde(skip_serializing_if = "Option::is_none")]
2131    pub max_output_length: Option<u64>,
2132}
2133
2134/// Status values reported for apply_patch tool calls.
2135#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2136#[serde(rename_all = "snake_case")]
2137pub enum ApplyPatchCallStatusParam {
2138    InProgress,
2139    Completed,
2140}
2141
2142/// Instruction for creating a new file via the apply_patch tool.
2143#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2144pub struct ApplyPatchCreateFileOperationParam {
2145    /// Path of the file to create relative to the workspace root.
2146    pub path: String,
2147    /// Unified diff content to apply when creating the file.
2148    pub diff: String,
2149}
2150
2151/// Instruction for deleting an existing file via the apply_patch tool.
2152#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2153pub struct ApplyPatchDeleteFileOperationParam {
2154    /// Path of the file to delete relative to the workspace root.
2155    pub path: String,
2156}
2157
2158/// Instruction for updating an existing file via the apply_patch tool.
2159#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2160pub struct ApplyPatchUpdateFileOperationParam {
2161    /// Path of the file to update relative to the workspace root.
2162    pub path: String,
2163    /// Unified diff content to apply to the existing file.
2164    pub diff: String,
2165}
2166
2167/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2168#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2169#[serde(tag = "type", rename_all = "snake_case")]
2170pub enum ApplyPatchOperationParam {
2171    CreateFile(ApplyPatchCreateFileOperationParam),
2172    DeleteFile(ApplyPatchDeleteFileOperationParam),
2173    UpdateFile(ApplyPatchUpdateFileOperationParam),
2174}
2175
2176/// A tool call representing a request to create, delete, or update files using diff patches.
2177#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2178pub struct ApplyPatchToolCallItemParam {
2179    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2180    #[serde(skip_serializing_if = "Option::is_none")]
2181    pub id: Option<String>,
2182    /// The unique ID of the apply patch tool call generated by the model.
2183    pub call_id: String,
2184    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2185    pub status: ApplyPatchCallStatusParam,
2186    /// The specific create, delete, or update instruction for the apply_patch tool call.
2187    pub operation: ApplyPatchOperationParam,
2188}
2189
2190/// Outcome values reported for apply_patch tool call outputs.
2191#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2192#[serde(rename_all = "snake_case")]
2193pub enum ApplyPatchCallOutputStatusParam {
2194    Completed,
2195    Failed,
2196}
2197
2198/// The streamed output emitted by an apply patch tool call.
2199#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2200pub struct ApplyPatchToolCallOutputItemParam {
2201    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2202    #[serde(skip_serializing_if = "Option::is_none")]
2203    pub id: Option<String>,
2204    /// The unique ID of the apply patch tool call generated by the model.
2205    pub call_id: String,
2206    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2207    pub status: ApplyPatchCallOutputStatusParam,
2208    /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2209    #[serde(skip_serializing_if = "Option::is_none")]
2210    pub output: Option<String>,
2211}
2212
2213/// Shell exec action
2214/// Execute a shell command.
2215#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2216pub struct FunctionShellAction {
2217    /// A list of commands to run.
2218    pub commands: Vec<String>,
2219    /// Optional timeout in milliseconds for the commands.
2220    pub timeout_ms: Option<u64>,
2221    /// Optional maximum number of characters to return from each command.
2222    pub max_output_length: Option<u64>,
2223}
2224
2225/// Status values reported for function shell tool calls.
2226#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2227#[serde(rename_all = "snake_case")]
2228pub enum LocalShellCallStatus {
2229    InProgress,
2230    Completed,
2231    Incomplete,
2232}
2233
2234/// A tool call that executes one or more shell commands in a managed environment.
2235#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2236pub struct FunctionShellCall {
2237    /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2238    pub id: String,
2239    /// The unique ID of the function shell tool call generated by the model.
2240    pub call_id: String,
2241    /// The shell commands and limits that describe how to run the tool call.
2242    pub action: FunctionShellAction,
2243    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2244    pub status: LocalShellCallStatus,
2245    /// The ID of the entity that created this tool call.
2246    #[serde(skip_serializing_if = "Option::is_none")]
2247    pub created_by: Option<String>,
2248}
2249
2250/// The content of a shell call output.
2251#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2252pub struct FunctionShellCallOutputContent {
2253    pub stdout: String,
2254    pub stderr: String,
2255    /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2256    #[serde(flatten)]
2257    pub outcome: FunctionShellCallOutputOutcome,
2258    #[serde(skip_serializing_if = "Option::is_none")]
2259    pub created_by: Option<String>,
2260}
2261
2262/// Function shell call outcome
2263#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2264#[serde(tag = "type", rename_all = "snake_case")]
2265pub enum FunctionShellCallOutputOutcome {
2266    Timeout,
2267    Exit(FunctionShellCallOutputExitOutcome),
2268}
2269
2270/// Indicates that the shell commands finished and returned an exit code.
2271#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2272pub struct FunctionShellCallOutputExitOutcome {
2273    /// Exit code from the shell process.
2274    pub exit_code: i32,
2275}
2276
2277/// The output of a shell tool call.
2278#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2279pub struct FunctionShellCallOutput {
2280    /// The unique ID of the shell call output. Populated when this item is returned via API.
2281    pub id: String,
2282    /// The unique ID of the shell tool call generated by the model.
2283    pub call_id: String,
2284    /// An array of shell call output contents
2285    pub output: Vec<FunctionShellCallOutputContent>,
2286    /// The maximum length of the shell command output. This is generated by the model and should be
2287    /// passed back with the raw output.
2288    pub max_output_length: Option<u64>,
2289    #[serde(skip_serializing_if = "Option::is_none")]
2290    pub created_by: Option<String>,
2291}
2292
2293/// Status values reported for apply_patch tool calls.
2294#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2295#[serde(rename_all = "snake_case")]
2296pub enum ApplyPatchCallStatus {
2297    InProgress,
2298    Completed,
2299}
2300
2301/// Instruction describing how to create a file via the apply_patch tool.
2302#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2303pub struct ApplyPatchCreateFileOperation {
2304    /// Path of the file to create.
2305    pub path: String,
2306    /// Diff to apply.
2307    pub diff: String,
2308}
2309
2310/// Instruction describing how to delete a file via the apply_patch tool.
2311#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2312pub struct ApplyPatchDeleteFileOperation {
2313    /// Path of the file to delete.
2314    pub path: String,
2315}
2316
2317/// Instruction describing how to update a file via the apply_patch tool.
2318#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2319pub struct ApplyPatchUpdateFileOperation {
2320    /// Path of the file to update.
2321    pub path: String,
2322    /// Diff to apply.
2323    pub diff: String,
2324}
2325
2326/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2327#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2328#[serde(tag = "type", rename_all = "snake_case")]
2329pub enum ApplyPatchOperation {
2330    CreateFile(ApplyPatchCreateFileOperation),
2331    DeleteFile(ApplyPatchDeleteFileOperation),
2332    UpdateFile(ApplyPatchUpdateFileOperation),
2333}
2334
2335/// A tool call that applies file diffs by creating, deleting, or updating files.
2336#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2337pub struct ApplyPatchToolCall {
2338    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2339    pub id: String,
2340    /// The unique ID of the apply patch tool call generated by the model.
2341    pub call_id: String,
2342    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2343    pub status: ApplyPatchCallStatus,
2344    /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2345    pub operation: ApplyPatchOperation,
2346    /// The ID of the entity that created this tool call.
2347    #[serde(skip_serializing_if = "Option::is_none")]
2348    pub created_by: Option<String>,
2349}
2350
2351/// Outcome values reported for apply_patch tool call outputs.
2352#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2353#[serde(rename_all = "snake_case")]
2354pub enum ApplyPatchCallOutputStatus {
2355    Completed,
2356    Failed,
2357}
2358
2359/// The output emitted by an apply patch tool call.
2360#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2361pub struct ApplyPatchToolCallOutput {
2362    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2363    pub id: String,
2364    /// The unique ID of the apply patch tool call generated by the model.
2365    pub call_id: String,
2366    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2367    pub status: ApplyPatchCallOutputStatus,
2368    /// Optional textual output returned by the apply patch tool.
2369    pub output: Option<String>,
2370    /// The ID of the entity that created this tool call output.
2371    #[serde(skip_serializing_if = "Option::is_none")]
2372    pub created_by: Option<String>,
2373}
2374
2375/// Output of an MCP server tool invocation.
2376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2377pub struct MCPToolCall {
2378    /// A JSON string of the arguments passed to the tool.
2379    pub arguments: String,
2380    /// The unique ID of the tool call.
2381    pub id: String,
2382    /// The name of the tool that was run.
2383    pub name: String,
2384    /// The label of the MCP server running the tool.
2385    pub server_label: String,
2386    /// Unique identifier for the MCP tool call approval request. Include this value
2387    /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2388    /// tool call.
2389    pub approval_request_id: Option<String>,
2390    /// Error message from the call, if any.
2391    pub error: Option<String>,
2392    /// The output from the tool call.
2393    pub output: Option<String>,
2394    /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2395    /// `calling`, or `failed`.
2396    pub status: Option<MCPToolCallStatus>,
2397}
2398
2399#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2400#[serde(rename_all = "snake_case")]
2401pub enum MCPToolCallStatus {
2402    InProgress,
2403    Completed,
2404    Incomplete,
2405    Calling,
2406    Failed,
2407}
2408
2409#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2410pub struct MCPListTools {
2411    /// The unique ID of the list.
2412    pub id: String,
2413    /// The label of the MCP server.
2414    pub server_label: String,
2415    /// The tools available on the server.
2416    pub tools: Vec<MCPListToolsTool>,
2417    /// Error message if listing failed.
2418    #[serde(skip_serializing_if = "Option::is_none")]
2419    pub error: Option<String>,
2420}
2421
2422#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2423pub struct MCPApprovalRequest {
2424    /// JSON string of arguments for the tool.
2425    pub arguments: String,
2426    /// The unique ID of the approval request.
2427    pub id: String,
2428    /// The name of the tool to run.
2429    pub name: String,
2430    /// The label of the MCP server making the request.
2431    pub server_label: String,
2432}
2433
2434#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2435pub struct InputTokenDetails {
2436    /// The number of tokens that were retrieved from the cache.
2437    /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
2438    pub cached_tokens: u32,
2439}
2440
2441#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2442pub struct OutputTokenDetails {
2443    /// The number of reasoning tokens.
2444    pub reasoning_tokens: u32,
2445}
2446
2447/// Usage statistics for a response.
2448#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2449pub struct ResponseUsage {
2450    /// The number of input tokens.
2451    pub input_tokens: u32,
2452    /// A detailed breakdown of the input tokens.
2453    pub input_tokens_details: InputTokenDetails,
2454    /// The number of output tokens.
2455    pub output_tokens: u32,
2456    /// A detailed breakdown of the output tokens.
2457    pub output_tokens_details: OutputTokenDetails,
2458    /// The total number of tokens used.
2459    pub total_tokens: u32,
2460}
2461
2462#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2463#[serde(untagged)]
2464pub enum Instructions {
2465    /// A text input to the model, equivalent to a text input with the `developer` role.
2466    Text(String),
2467    /// A list of one or many input items to the model, containing different content types.
2468    Array(Vec<InputItem>),
2469}
2470
2471/// The complete response returned by the Responses API.
2472#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2473pub struct Response {
2474    /// Whether to run the model response in the background.
2475    /// [Learn more](https://platform.openai.com/docs/guides/background).
2476    #[serde(skip_serializing_if = "Option::is_none")]
2477    pub background: Option<bool>,
2478
2479    /// Billing information for the response.
2480    #[serde(skip_serializing_if = "Option::is_none")]
2481    pub billing: Option<Billing>,
2482
2483    /// The conversation that this response belongs to. Input items and output
2484    /// items from this response are automatically added to this conversation.
2485    #[serde(skip_serializing_if = "Option::is_none")]
2486    pub conversation: Option<Conversation>,
2487
2488    /// Unix timestamp (in seconds) when this Response was created.
2489    pub created_at: u64,
2490
2491    /// An error object returned when the model fails to generate a Response.
2492    #[serde(skip_serializing_if = "Option::is_none")]
2493    pub error: Option<ErrorObject>,
2494
2495    /// Unique identifier for this response.
2496    pub id: String,
2497
2498    /// Details about why the response is incomplete, if any.
2499    #[serde(skip_serializing_if = "Option::is_none")]
2500    pub incomplete_details: Option<IncompleteDetails>,
2501
2502    /// A system (or developer) message inserted into the model's context.
2503    ///
2504    /// When using along with `previous_response_id`, the instructions from a previous response
2505    /// will not be carried over to the next response. This makes it simple to swap out
2506    /// system (or developer) messages in new responses.
2507    #[serde(skip_serializing_if = "Option::is_none")]
2508    pub instructions: Option<Instructions>,
2509
2510    /// An upper bound for the number of tokens that can be generated for a response,
2511    /// including visible output tokens and
2512    /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2513    #[serde(skip_serializing_if = "Option::is_none")]
2514    pub max_output_tokens: Option<u32>,
2515
2516    /// Set of 16 key-value pairs that can be attached to an object. This can be
2517    /// useful for storing additional information about the object in a structured
2518    /// format, and querying for objects via API or the dashboard.
2519    ///
2520    /// Keys are strings with a maximum length of 64 characters. Values are strings
2521    /// with a maximum length of 512 characters.
2522    #[serde(skip_serializing_if = "Option::is_none")]
2523    pub metadata: Option<HashMap<String, String>>,
2524
2525    /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2526    /// wide range of models with different capabilities, performance characteristics,
2527    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2528    pub model: String,
2529
2530    /// The object type of this resource - always set to `response`.
2531    pub object: String,
2532
2533    /// An array of content items generated by the model.
2534    ///
2535    /// - The length and order of items in the output array is dependent on the model's response.
2536    /// - Rather than accessing the first item in the output array and assuming it's an assistant
2537    ///   message with the content generated by the model, you might consider using
2538    ///   the `output_text` property where supported in SDKs.
2539    pub output: Vec<OutputItem>,
2540
2541    /// SDK-only convenience property that contains the aggregated text output from all
2542    /// `output_text` items in the `output` array, if any are present.
2543    /// Supported in the Python and JavaScript SDKs.
2544    // #[serde(skip_serializing_if = "Option::is_none")]
2545    // pub output_text: Option<String>,
2546
2547    /// Whether to allow the model to run tool calls in parallel.
2548    #[serde(skip_serializing_if = "Option::is_none")]
2549    pub parallel_tool_calls: Option<bool>,
2550
2551    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2552    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2553    /// Cannot be used in conjunction with `conversation`.
2554    #[serde(skip_serializing_if = "Option::is_none")]
2555    pub previous_response_id: Option<String>,
2556
2557    /// Reference to a prompt template and its variables.
2558    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2559    #[serde(skip_serializing_if = "Option::is_none")]
2560    pub prompt: Option<Prompt>,
2561
2562    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2563    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2564    #[serde(skip_serializing_if = "Option::is_none")]
2565    pub prompt_cache_key: Option<String>,
2566
2567    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2568    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2569    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).    
2570    #[serde(skip_serializing_if = "Option::is_none")]
2571    pub prompt_cache_retention: Option<PromptCacheRetention>,
2572
2573    /// **gpt-5 and o-series models only**
2574    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2575    #[serde(skip_serializing_if = "Option::is_none")]
2576    pub reasoning: Option<Reasoning>,
2577
2578    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2579    /// usage policies.
2580    ///
2581    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2582    /// or email address, in order to avoid sending us any identifying information. [Learn
2583    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2584    #[serde(skip_serializing_if = "Option::is_none")]
2585    pub safety_identifier: Option<String>,
2586
2587    /// Specifies the processing type used for serving the request.
2588    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2589    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2590    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2591    /// - When not set, the default behavior is 'auto'.
2592    ///
2593    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2594    #[serde(skip_serializing_if = "Option::is_none")]
2595    pub service_tier: Option<ServiceTier>,
2596
2597    /// The status of the response generation.
2598    /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2599    pub status: Status,
2600
2601    /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2602    /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2603    ///
2604    /// We generally recommend altering this or `top_p` but not both.
2605    #[serde(skip_serializing_if = "Option::is_none")]
2606    pub temperature: Option<f32>,
2607
2608    /// Configuration options for a text response from the model. Can be plain
2609    /// text or structured JSON data. Learn more:
2610    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2611    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2612    #[serde(skip_serializing_if = "Option::is_none")]
2613    pub text: Option<ResponseTextParam>,
2614
2615    /// How the model should select which tool (or tools) to use when generating
2616    /// a response. See the `tools` parameter to see how to specify which tools
2617    /// the model can call.
2618    #[serde(skip_serializing_if = "Option::is_none")]
2619    pub tool_choice: Option<ToolChoiceParam>,
2620
2621    /// An array of tools the model may call while generating a response. You
2622    /// can specify which tool to use by setting the `tool_choice` parameter.
2623    ///
2624    /// We support the following categories of tools:
2625    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2626    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2627    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2628    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
2629    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2630    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
2631    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2632    /// - **Function calls (custom tools)**: Functions that are defined by you,
2633    ///   enabling the model to call your own code with strongly typed arguments
2634    ///   and outputs. Learn more about
2635    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2636    ///   custom tools to call your own code.
2637    #[serde(skip_serializing_if = "Option::is_none")]
2638    pub tools: Option<Vec<Tool>>,
2639
2640    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2641    /// token position, each with an associated log probability.
2642    #[serde(skip_serializing_if = "Option::is_none")]
2643    pub top_logprobs: Option<u8>,
2644
2645    /// An alternative to sampling with temperature, called nucleus sampling,
2646    /// where the model considers the results of the tokens with top_p probability
2647    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2648    /// are considered.
2649    ///
2650    /// We generally recommend altering this or `temperature` but not both.
2651    #[serde(skip_serializing_if = "Option::is_none")]
2652    pub top_p: Option<f32>,
2653
2654    ///The truncation strategy to use for the model response.
2655    /// - `auto`: If the input to this Response exceeds
2656    ///   the model's context window size, the model will truncate the
2657    ///   response to fit the context window by dropping items from the beginning of the conversation.
2658    /// - `disabled` (default): If the input size will exceed the context window
2659    ///   size for a model, the request will fail with a 400 error.
2660    #[serde(skip_serializing_if = "Option::is_none")]
2661    pub truncation: Option<Truncation>,
2662
2663    /// Represents token usage details including input tokens, output tokens,
2664    /// a breakdown of output tokens, and the total tokens used.
2665    #[serde(skip_serializing_if = "Option::is_none")]
2666    pub usage: Option<ResponseUsage>,
2667}
2668
2669#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2670#[serde(rename_all = "snake_case")]
2671pub enum Status {
2672    Completed,
2673    Failed,
2674    InProgress,
2675    Cancelled,
2676    Queued,
2677    Incomplete,
2678}
2679
2680/// Output item
2681#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2682#[serde(tag = "type")]
2683#[serde(rename_all = "snake_case")]
2684pub enum OutputItem {
2685    /// An output message from the model.
2686    Message(OutputMessage),
2687    /// The results of a file search tool call. See the
2688    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2689    /// for more information.
2690    FileSearchCall(FileSearchToolCall),
2691    /// A tool call to run a function. See the
2692    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2693    /// for more information.
2694    FunctionCall(FunctionToolCall),
2695    /// The results of a web search tool call. See the
2696    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2697    /// for more information.
2698    WebSearchCall(WebSearchToolCall),
2699    /// A tool call to a computer use tool. See the
2700    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2701    /// for more information.
2702    ComputerCall(ComputerToolCall),
2703    /// A description of the chain of thought used by a reasoning model while generating
2704    /// a response. Be sure to include these items in your `input` to the Responses API for
2705    /// subsequent turns of a conversation if you are manually
2706    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2707    Reasoning(ReasoningItem),
2708    /// An image generation request made by the model.
2709    ImageGenerationCall(ImageGenToolCall),
2710    /// A tool call to run code.
2711    CodeInterpreterCall(CodeInterpreterToolCall),
2712    /// A tool call to run a command on the local shell.
2713    LocalShellCall(LocalShellToolCall),
2714    /// A tool call that executes one or more shell commands in a managed environment.
2715    ShellCall(FunctionShellCall),
2716    /// The output of a shell tool call.
2717    ShellCallOutput(FunctionShellCallOutput),
2718    /// A tool call that applies file diffs by creating, deleting, or updating files.
2719    ApplyPatchCall(ApplyPatchToolCall),
2720    /// The output emitted by an apply patch tool call.
2721    ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2722    /// An invocation of a tool on an MCP server.
2723    McpCall(MCPToolCall),
2724    /// A list of tools available on an MCP server.
2725    McpListTools(MCPListTools),
2726    /// A request for human approval of a tool invocation.
2727    McpApprovalRequest(MCPApprovalRequest),
2728    /// A call to a custom tool created by the model.
2729    CustomToolCall(CustomToolCall),
2730}
2731
2732#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2733#[non_exhaustive]
2734pub struct CustomToolCall {
2735    /// An identifier used to map this custom tool call to a tool call output.
2736    pub call_id: String,
2737    /// The input for the custom tool call generated by the model.
2738    pub input: String,
2739    /// The name of the custom tool being called.
2740    pub name: String,
2741    /// The unique ID of the custom tool call in the OpenAI platform.
2742    pub id: String,
2743}
2744
2745#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2746pub struct DeleteResponse {
2747    pub object: String,
2748    pub deleted: bool,
2749    pub id: String,
2750}
2751
2752#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2753pub struct AnyItemReference {
2754    pub r#type: Option<String>,
2755    pub id: String,
2756}
2757
2758#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2759#[serde(tag = "type", rename_all = "snake_case")]
2760pub enum ItemResourceItem {
2761    Message(MessageItem),
2762    FileSearchCall(FileSearchToolCall),
2763    ComputerCall(ComputerToolCall),
2764    ComputerCallOutput(ComputerCallOutputItemParam),
2765    WebSearchCall(WebSearchToolCall),
2766    FunctionCall(FunctionToolCall),
2767    FunctionCallOutput(FunctionCallOutputItemParam),
2768    ImageGenerationCall(ImageGenToolCall),
2769    CodeInterpreterCall(CodeInterpreterToolCall),
2770    LocalShellCall(LocalShellToolCall),
2771    LocalShellCallOutput(LocalShellToolCallOutput),
2772    ShellCall(FunctionShellCallItemParam),
2773    ShellCallOutput(FunctionShellCallOutputItemParam),
2774    ApplyPatchCall(ApplyPatchToolCallItemParam),
2775    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2776    McpListTools(MCPListTools),
2777    McpApprovalRequest(MCPApprovalRequest),
2778    McpApprovalResponse(MCPApprovalResponse),
2779    McpCall(MCPToolCall),
2780}
2781
2782#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2783#[serde(untagged)]
2784pub enum ItemResource {
2785    ItemReference(AnyItemReference),
2786    Item(ItemResourceItem),
2787}
2788
2789/// A list of Response items.
2790#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2791pub struct ResponseItemList {
2792    /// The type of object returned, must be `list`.
2793    pub object: String,
2794    /// The ID of the first item in the list.
2795    pub first_id: Option<String>,
2796    /// The ID of the last item in the list.
2797    pub last_id: Option<String>,
2798    /// Whether there are more items in the list.
2799    pub has_more: bool,
2800    /// The list of items.
2801    pub data: Vec<ItemResource>,
2802}
2803
2804#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2805#[builder(
2806    name = "TokenCountsBodyArgs",
2807    pattern = "mutable",
2808    setter(into, strip_option),
2809    default
2810)]
2811#[builder(build_fn(error = "OpenAIError"))]
2812pub struct TokenCountsBody {
2813    /// The conversation that this response belongs to. Items from this
2814    /// conversation are prepended to `input_items` for this response request.
2815    /// Input items and output items from this response are automatically added to this
2816    /// conversation after this response completes.
2817    #[serde(skip_serializing_if = "Option::is_none")]
2818    pub conversation: Option<ConversationParam>,
2819
2820    /// Text, image, or file inputs to the model, used to generate a response
2821    #[serde(skip_serializing_if = "Option::is_none")]
2822    pub input: Option<InputParam>,
2823
2824    /// A system (or developer) message inserted into the model's context.
2825    ///
2826    /// When used along with `previous_response_id`, the instructions from a previous response will
2827    /// not be carried over to the next response. This makes it simple to swap out system (or
2828    /// developer) messages in new responses.
2829    #[serde(skip_serializing_if = "Option::is_none")]
2830    pub instructions: Option<String>,
2831
2832    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2833    /// wide range of models with different capabilities, performance characteristics,
2834    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2835    /// to browse and compare available models.
2836    #[serde(skip_serializing_if = "Option::is_none")]
2837    pub model: Option<String>,
2838
2839    /// Whether to allow the model to run tool calls in parallel.
2840    #[serde(skip_serializing_if = "Option::is_none")]
2841    pub parallel_tool_calls: Option<bool>,
2842
2843    /// The unique ID of the previous response to the model. Use this to create multi-turn
2844    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2845    /// Cannot be used in conjunction with `conversation`.
2846    #[serde(skip_serializing_if = "Option::is_none")]
2847    pub previous_response_id: Option<String>,
2848
2849    /// **gpt-5 and o-series models only**
2850    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2851    #[serde(skip_serializing_if = "Option::is_none")]
2852    pub reasoning: Option<Reasoning>,
2853
2854    /// Configuration options for a text response from the model. Can be plain
2855    /// text or structured JSON data. Learn more:
2856    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2857    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2858    #[serde(skip_serializing_if = "Option::is_none")]
2859    pub text: Option<ResponseTextParam>,
2860
2861    /// How the model should select which tool (or tools) to use when generating
2862    /// a response. See the `tools` parameter to see how to specify which tools
2863    /// the model can call.
2864    #[serde(skip_serializing_if = "Option::is_none")]
2865    pub tool_choice: Option<ToolChoiceParam>,
2866
2867    /// An array of tools the model may call while generating a response. You can specify which tool
2868    /// to use by setting the `tool_choice` parameter.
2869    #[serde(skip_serializing_if = "Option::is_none")]
2870    pub tools: Option<Vec<Tool>>,
2871
2872    ///The truncation strategy to use for the model response.
2873    /// - `auto`: If the input to this Response exceeds
2874    ///   the model's context window size, the model will truncate the
2875    ///   response to fit the context window by dropping items from the beginning of the conversation.
2876    /// - `disabled` (default): If the input size will exceed the context window
2877    ///   size for a model, the request will fail with a 400 error.
2878    #[serde(skip_serializing_if = "Option::is_none")]
2879    pub truncation: Option<Truncation>,
2880}
2881
2882#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2883pub struct TokenCountsResource {
2884    pub object: String,
2885    pub input_tokens: u32,
2886}