Skip to main content

async_openai/types/responses/
response.rs

1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4    CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5    ResponseUsage, SummaryTextContent,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15    #[default]
16    User,
17    Assistant,
18    System,
19    Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26    InProgress,
27    Completed,
28    Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34    ///  A text input to the model, equivalent to a text input with the
35    /// `user` role.
36    Text(String),
37    /// A list of one or many input items to the model, containing
38    /// different content types.
39    Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52    /// A message (type: "message").
53    /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54    ///
55    /// InputMessage:
56    ///     A message input to the model with a role indicating instruction following hierarchy.
57    ///     Instructions given with the developer or system role take precedence over instructions given with the user role.
58    /// OutputMessage:
59    ///     A message output from the model.
60    Message(MessageItem),
61
62    /// The results of a file search tool call. See the
63    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64    FileSearchCall(FileSearchToolCall),
65
66    /// A tool call to a computer use tool. See the
67    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68    ComputerCall(ComputerToolCall),
69
70    /// The output of a computer tool call.
71    ComputerCallOutput(ComputerCallOutputItemParam),
72
73    /// The results of a web search tool call. See the
74    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75    WebSearchCall(WebSearchToolCall),
76
77    /// A tool call to run a function. See the
78    ///
79    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80    FunctionCall(FunctionToolCall),
81
82    /// The output of a function tool call.
83    FunctionCallOutput(FunctionCallOutputItemParam),
84
85    /// A description of the chain of thought used by a reasoning model while generating
86    /// a response. Be sure to include these items in your `input` to the Responses API
87    /// for subsequent turns of a conversation if you are manually
88    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89    Reasoning(ReasoningItem),
90
91    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
92    Compaction(CompactionSummaryItemParam),
93
94    /// An image generation request made by the model.
95    ImageGenerationCall(ImageGenToolCall),
96
97    /// A tool call to run code.
98    CodeInterpreterCall(CodeInterpreterToolCall),
99
100    /// A tool call to run a command on the local shell.
101    LocalShellCall(LocalShellToolCall),
102
103    /// The output of a local shell tool call.
104    LocalShellCallOutput(LocalShellToolCallOutput),
105
106    /// A tool representing a request to execute one or more shell commands.
107    ShellCall(FunctionShellCallItemParam),
108
109    /// The streamed output items emitted by a shell tool call.
110    ShellCallOutput(FunctionShellCallOutputItemParam),
111
112    /// A tool call representing a request to create, delete, or update files using diff patches.
113    ApplyPatchCall(ApplyPatchToolCallItemParam),
114
115    /// The streamed output emitted by an apply patch tool call.
116    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
117
118    /// A list of tools available on an MCP server.
119    McpListTools(MCPListTools),
120
121    /// A request for human approval of a tool invocation.
122    McpApprovalRequest(MCPApprovalRequest),
123
124    /// A response to an MCP approval request.
125    McpApprovalResponse(MCPApprovalResponse),
126
127    /// An invocation of a tool on an MCP server.
128    McpCall(MCPToolCall),
129
130    /// The output of a custom tool call from your code, being sent back to the model.
131    CustomToolCallOutput(CustomToolCallOutput),
132
133    /// A call to a custom tool created by the model.
134    CustomToolCall(CustomToolCall),
135}
136
137/// Input item that can be used in the context for generating a response.
138///
139/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
140/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
141/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
142/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
143///
144/// Uses untagged deserialization because these types overlap in structure.
145/// Order matters: more specific structures are tried first.
146///
147/// # OpenAPI Specification
148/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
150#[serde(untagged)]
151pub enum InputItem {
152    /// A reference to an existing item by ID.
153    /// Has a required `id` field and optional `type` (can be "item_reference" or null).
154    /// Must be tried first as it's the most minimal structure.
155    ItemReference(ItemReference),
156
157    /// All structured items with proper type discrimination.
158    /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
159    /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
160    Item(Item),
161
162    /// A simple, user-friendly message input (EasyInputMessage).
163    /// Supports string content and can include assistant role for previous responses.
164    /// Must be tried last as it's the most flexible structure.
165    ///
166    /// A message input to the model with a role indicating instruction following
167    /// hierarchy. Instructions given with the `developer` or `system` role take
168    /// precedence over instructions given with the `user` role. Messages with the
169    /// `assistant` role are presumed to have been generated by the model in previous
170    /// interactions.
171    EasyMessage(EasyInputMessage),
172}
173
174/// A message item used within the `Item` enum.
175///
176/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
177/// enum to distinguish them based on their structure:
178/// - OutputMessage: role=assistant, required id & status fields
179/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
180///
181/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
182/// not part of the structured `Item` enum.
183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
184#[serde(untagged)]
185pub enum MessageItem {
186    /// An output message from the model (role: assistant, has required id & status).
187    /// This must come first as it has the most specific structure (required id and status fields).
188    Output(OutputMessage),
189
190    /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
191    /// Has structured content list and optional id/status fields.
192    ///
193    /// A message input to the model with a role indicating instruction following hierarchy.
194    /// Instructions given with the `developer` or `system` role take precedence over instructions
195    /// given with the `user` role.
196    Input(InputMessage),
197}
198
199/// A reference to an existing item by ID.
200#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
201pub struct ItemReference {
202    /// The type of item to reference. Can be "item_reference" or null.
203    #[serde(skip_serializing_if = "Option::is_none")]
204    pub r#type: Option<ItemReferenceType>,
205    /// The ID of the item to reference.
206    pub id: String,
207}
208
209#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
210#[serde(rename_all = "snake_case")]
211pub enum ItemReferenceType {
212    ItemReference,
213}
214
215/// Output from a function call that you're providing back to the model.
216#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
217pub struct FunctionCallOutputItemParam {
218    /// The unique ID of the function tool call generated by the model.
219    pub call_id: String,
220    /// Text, image, or file output of the function tool call.
221    pub output: FunctionCallOutput,
222    /// The unique ID of the function tool call output.
223    /// Populated when this item is returned via API.
224    #[serde(skip_serializing_if = "Option::is_none")]
225    pub id: Option<String>,
226    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
227    /// Populated when items are returned via API.
228    #[serde(skip_serializing_if = "Option::is_none")]
229    pub status: Option<OutputStatus>,
230}
231
232#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
233#[serde(untagged)]
234pub enum FunctionCallOutput {
235    /// A JSON string of the output of the function tool call.
236    Text(String),
237    Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
238}
239
240#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
241pub struct ComputerCallOutputItemParam {
242    /// The ID of the computer tool call that produced the output.
243    pub call_id: String,
244    /// A computer screenshot image used with the computer use tool.
245    pub output: ComputerScreenshotImage,
246    /// The safety checks reported by the API that have been acknowledged by the developer.
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
249    /// The unique ID of the computer tool call output. Optional when creating.
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub id: Option<String>,
252    /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
253    /// Populated when input items are returned via API.
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
256}
257
258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
259#[serde(rename_all = "snake_case")]
260pub enum ComputerScreenshotImageType {
261    ComputerScreenshot,
262}
263
264/// A computer screenshot image used with the computer use tool.
265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
266pub struct ComputerScreenshotImage {
267    /// Specifies the event type. For a computer screenshot, this property is always
268    /// set to `computer_screenshot`.
269    pub r#type: ComputerScreenshotImageType,
270    /// The identifier of an uploaded file that contains the screenshot.
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub file_id: Option<String>,
273    /// The URL of the screenshot image.
274    #[serde(skip_serializing_if = "Option::is_none")]
275    pub image_url: Option<String>,
276}
277
278/// Output from a local shell tool call that you're providing back to the model.
279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
280pub struct LocalShellToolCallOutput {
281    /// The unique ID of the local shell tool call generated by the model.
282    pub id: String,
283
284    /// A JSON string of the output of the local shell tool call.
285    pub output: String,
286
287    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
288    #[serde(skip_serializing_if = "Option::is_none")]
289    pub status: Option<OutputStatus>,
290}
291
292/// Output from a local shell command execution.
293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
294pub struct LocalShellOutput {
295    /// The stdout output from the command.
296    #[serde(skip_serializing_if = "Option::is_none")]
297    pub stdout: Option<String>,
298
299    /// The stderr output from the command.
300    #[serde(skip_serializing_if = "Option::is_none")]
301    pub stderr: Option<String>,
302
303    /// The exit code of the command.
304    #[serde(skip_serializing_if = "Option::is_none")]
305    pub exit_code: Option<i32>,
306}
307
308/// An MCP approval response that you're providing back to the model.
309#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
310pub struct MCPApprovalResponse {
311    /// The ID of the approval request being answered.
312    pub approval_request_id: String,
313
314    /// Whether the request was approved.
315    pub approve: bool,
316
317    /// The unique ID of the approval response
318    #[serde(skip_serializing_if = "Option::is_none")]
319    pub id: Option<String>,
320
321    /// Optional reason for the decision.
322    #[serde(skip_serializing_if = "Option::is_none")]
323    pub reason: Option<String>,
324}
325
326#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
327#[serde(untagged)]
328pub enum CustomToolCallOutputOutput {
329    /// A string of the output of the custom tool call.
330    Text(String),
331    /// Text, image, or file output of the custom tool call.
332    List(Vec<InputContent>),
333}
334
335#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
336pub struct CustomToolCallOutput {
337    /// The call ID, used to map this custom tool call output to a custom tool call.
338    pub call_id: String,
339
340    /// The output from the custom tool call generated by your code.
341    /// Can be a string or an list of output content.
342    pub output: CustomToolCallOutputOutput,
343
344    /// The unique ID of the custom tool call output in the OpenAI platform.
345    #[serde(skip_serializing_if = "Option::is_none")]
346    pub id: Option<String>,
347}
348
349/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
350///
351/// This is the most user-friendly way to provide messages, supporting both simple
352/// string content and structured content. Role can include `assistant` for providing
353/// previous assistant responses.
354#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
355#[builder(
356    name = "EasyInputMessageArgs",
357    pattern = "mutable",
358    setter(into, strip_option),
359    default
360)]
361#[builder(build_fn(error = "OpenAIError"))]
362pub struct EasyInputMessage {
363    /// The type of the message input. Defaults to `message` when omitted in JSON input.
364    #[serde(default)]
365    pub r#type: MessageType,
366    /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
367    pub role: Role,
368    /// Text, image, or audio input to the model, used to generate a response.
369    /// Can also contain previous assistant responses.
370    pub content: EasyInputContent,
371}
372
373/// A structured message input to the model (InputMessage in the OpenAPI spec).
374///
375/// This variant requires structured content (not a simple string) and does not support
376/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
377#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
378#[builder(
379    name = "InputMessageArgs",
380    pattern = "mutable",
381    setter(into, strip_option),
382    default
383)]
384#[builder(build_fn(error = "OpenAIError"))]
385pub struct InputMessage {
386    /// A list of one or many input items to the model, containing different content types.
387    pub content: Vec<InputContent>,
388    /// The role of the message input. One of `user`, `system`, or `developer`.
389    /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
390    pub role: InputRole,
391    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
392    /// Populated when items are returned via API.
393    #[serde(skip_serializing_if = "Option::is_none")]
394    pub status: Option<OutputStatus>,
395    /////The type of the message input. Always set to `message`.
396    //pub r#type: MessageType,
397}
398
399/// The role for an input message - can only be `user`, `system`, or `developer`.
400/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
401#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
402#[serde(rename_all = "lowercase")]
403pub enum InputRole {
404    #[default]
405    User,
406    System,
407    Developer,
408}
409
410/// Content for EasyInputMessage - can be a simple string or structured list.
411#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
412#[serde(untagged)]
413pub enum EasyInputContent {
414    /// A text input to the model.
415    Text(String),
416    /// A list of one or many input items to the model, containing different content types.
417    ContentList(Vec<InputContent>),
418}
419
420/// Parts of a message: text, image, file, or audio.
421#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
422#[serde(tag = "type", rename_all = "snake_case")]
423pub enum InputContent {
424    /// A text input to the model.
425    InputText(InputTextContent),
426    /// An image input to the model. Learn about
427    /// [image inputs](https://platform.openai.com/docs/guides/vision).
428    InputImage(InputImageContent),
429    /// A file input to the model.
430    InputFile(InputFileContent),
431}
432
433#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
434pub struct InputTextContent {
435    /// The text input to the model.
436    pub text: String,
437}
438
439#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
440#[builder(
441    name = "InputImageArgs",
442    pattern = "mutable",
443    setter(into, strip_option),
444    default
445)]
446#[builder(build_fn(error = "OpenAIError"))]
447pub struct InputImageContent {
448    /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
449    /// Defaults to `auto`.
450    pub detail: ImageDetail,
451    /// The ID of the file to be sent to the model.
452    #[serde(skip_serializing_if = "Option::is_none")]
453    pub file_id: Option<String>,
454    /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
455    /// in a data URL.
456    #[serde(skip_serializing_if = "Option::is_none")]
457    pub image_url: Option<String>,
458}
459
460#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
461#[builder(
462    name = "InputFileArgs",
463    pattern = "mutable",
464    setter(into, strip_option),
465    default
466)]
467#[builder(build_fn(error = "OpenAIError"))]
468pub struct InputFileContent {
469    /// The content of the file to be sent to the model.
470    #[serde(skip_serializing_if = "Option::is_none")]
471    file_data: Option<String>,
472    /// The ID of the file to be sent to the model.
473    #[serde(skip_serializing_if = "Option::is_none")]
474    file_id: Option<String>,
475    /// The URL of the file to be sent to the model.
476    #[serde(skip_serializing_if = "Option::is_none")]
477    file_url: Option<String>,
478    /// The name of the file to be sent to the model.
479    #[serde(skip_serializing_if = "Option::is_none")]
480    filename: Option<String>,
481}
482
483/// The conversation that this response belonged to. Input items and output items from this
484/// response were automatically added to this conversation.
485#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
486pub struct Conversation {
487    /// The unique ID of the conversation that this response was associated with.
488    pub id: String,
489}
490
491#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
492#[serde(untagged)]
493pub enum ConversationParam {
494    /// The unique ID of the conversation.
495    ConversationID(String),
496    /// The conversation that this response belongs to.
497    Object(Conversation),
498}
499
500#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
501pub enum IncludeEnum {
502    #[serde(rename = "file_search_call.results")]
503    FileSearchCallResults,
504    #[serde(rename = "web_search_call.results")]
505    WebSearchCallResults,
506    #[serde(rename = "web_search_call.action.sources")]
507    WebSearchCallActionSources,
508    #[serde(rename = "message.input_image.image_url")]
509    MessageInputImageImageUrl,
510    #[serde(rename = "computer_call_output.output.image_url")]
511    ComputerCallOutputOutputImageUrl,
512    #[serde(rename = "code_interpreter_call.outputs")]
513    CodeInterpreterCallOutputs,
514    #[serde(rename = "reasoning.encrypted_content")]
515    ReasoningEncryptedContent,
516    #[serde(rename = "message.output_text.logprobs")]
517    MessageOutputTextLogprobs,
518}
519
520#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
521pub struct ResponseStreamOptions {
522    /// When true, stream obfuscation will be enabled. Stream obfuscation adds
523    /// random characters to an `obfuscation` field on streaming delta events to
524    /// normalize payload sizes as a mitigation to certain side-channel attacks.
525    /// These obfuscation fields are included by default, but add a small amount
526    /// of overhead to the data stream. You can set `include_obfuscation` to
527    /// false to optimize for bandwidth if you trust the network links between
528    /// your application and the OpenAI API.
529    #[serde(skip_serializing_if = "Option::is_none")]
530    pub include_obfuscation: Option<bool>,
531}
532
533/// Builder for a Responses API request.
534#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
535#[builder(
536    name = "CreateResponseArgs",
537    pattern = "mutable",
538    setter(into, strip_option),
539    default
540)]
541#[builder(build_fn(error = "OpenAIError"))]
542pub struct CreateResponse {
543    /// Whether to run the model response in the background.
544    /// [Learn more](https://platform.openai.com/docs/guides/background).
545    #[serde(skip_serializing_if = "Option::is_none")]
546    pub background: Option<bool>,
547
548    /// The conversation that this response belongs to. Items from this conversation are prepended to
549    ///  `input_items` for this response request.
550    ///
551    /// Input items and output items from this response are automatically added to this conversation after
552    /// this response completes.
553    #[serde(skip_serializing_if = "Option::is_none")]
554    pub conversation: Option<ConversationParam>,
555
556    /// Specify additional output data to include in the model response. Currently supported
557    /// values are:
558    ///
559    /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
560    ///
561    /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
562    ///   interpreter tool call items.
563    ///
564    /// - `computer_call_output.output.image_url`: Include image urls from the computer call
565    ///   output.
566    ///
567    /// - `file_search_call.results`: Include the search results of the file search tool call.
568    ///
569    /// - `message.input_image.image_url`: Include image urls from the input message.
570    ///
571    /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
572    ///
573    /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
574    ///   reasoning item outputs. This enables reasoning items to be used in multi-turn
575    ///   conversations when using the Responses API statelessly (like when the `store` parameter is
576    ///   set to `false`, or when an organization is enrolled in the zero data retention program).
577    #[serde(skip_serializing_if = "Option::is_none")]
578    pub include: Option<Vec<IncludeEnum>>,
579
580    /// Text, image, or file inputs to the model, used to generate a response.
581    ///
582    /// Learn more:
583    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
584    /// - [Image inputs](https://platform.openai.com/docs/guides/images)
585    /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
586    /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
587    /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
588    pub input: InputParam,
589
590    /// A system (or developer) message inserted into the model's context.
591    ///
592    /// When using along with `previous_response_id`, the instructions from a previous
593    /// response will not be carried over to the next response. This makes it simple
594    /// to swap out system (or developer) messages in new responses.
595    #[serde(skip_serializing_if = "Option::is_none")]
596    pub instructions: Option<String>,
597
598    /// An upper bound for the number of tokens that can be generated for a response, including
599    /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
600    #[serde(skip_serializing_if = "Option::is_none")]
601    pub max_output_tokens: Option<u32>,
602
603    /// The maximum number of total calls to built-in tools that can be processed in a response. This
604    /// maximum number applies across all built-in tool calls, not per individual tool. Any further
605    /// attempts to call a tool by the model will be ignored.
606    #[serde(skip_serializing_if = "Option::is_none")]
607    pub max_tool_calls: Option<u32>,
608
609    /// Set of 16 key-value pairs that can be attached to an object. This can be
610    /// useful for storing additional information about the object in a structured
611    /// format, and querying for objects via API or the dashboard.
612    ///
613    /// Keys are strings with a maximum length of 64 characters. Values are
614    /// strings with a maximum length of 512 characters.
615    #[serde(skip_serializing_if = "Option::is_none")]
616    pub metadata: Option<HashMap<String, String>>,
617
618    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
619    /// offers a wide range of models with different capabilities, performance
620    /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
621    /// to browse and compare available models.
622    #[serde(skip_serializing_if = "Option::is_none")]
623    pub model: Option<String>,
624
625    /// Whether to allow the model to run tool calls in parallel.
626    #[serde(skip_serializing_if = "Option::is_none")]
627    pub parallel_tool_calls: Option<bool>,
628
629    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
630    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
631    /// Cannot be used in conjunction with `conversation`.
632    #[serde(skip_serializing_if = "Option::is_none")]
633    pub previous_response_id: Option<String>,
634
635    /// Reference to a prompt template and its variables.
636    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
637    #[serde(skip_serializing_if = "Option::is_none")]
638    pub prompt: Option<Prompt>,
639
640    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
641    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
642    #[serde(skip_serializing_if = "Option::is_none")]
643    pub prompt_cache_key: Option<String>,
644
645    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
646    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
647    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
648    #[serde(skip_serializing_if = "Option::is_none")]
649    pub prompt_cache_retention: Option<PromptCacheRetention>,
650
651    /// **gpt-5 and o-series models only**
652    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
653    #[serde(skip_serializing_if = "Option::is_none")]
654    pub reasoning: Option<Reasoning>,
655
656    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
657    /// usage policies.
658    ///
659    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
660    /// or email address, in order to avoid sending us any identifying information. [Learn
661    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
662    #[serde(skip_serializing_if = "Option::is_none")]
663    pub safety_identifier: Option<String>,
664
665    /// Specifies the processing type used for serving the request.
666    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
667    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
668    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
669    /// - When not set, the default behavior is 'auto'.
670    ///
671    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
672    #[serde(skip_serializing_if = "Option::is_none")]
673    pub service_tier: Option<ServiceTier>,
674
675    /// Whether to store the generated model response for later retrieval via API.
676    #[serde(skip_serializing_if = "Option::is_none")]
677    pub store: Option<bool>,
678
679    /// If set to true, the model response data will be streamed to the client
680    /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
681    /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
682    /// for more information.
683    #[serde(skip_serializing_if = "Option::is_none")]
684    pub stream: Option<bool>,
685
686    /// Options for streaming responses. Only set this when you set `stream: true`.
687    #[serde(skip_serializing_if = "Option::is_none")]
688    pub stream_options: Option<ResponseStreamOptions>,
689
690    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
691    /// will make the output more random, while lower values like 0.2 will make it
692    /// more focused and deterministic. We generally recommend altering this or
693    /// `top_p` but not both.
694    #[serde(skip_serializing_if = "Option::is_none")]
695    pub temperature: Option<f32>,
696
697    /// Configuration options for a text response from the model. Can be plain
698    /// text or structured JSON data. Learn more:
699    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
700    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
701    #[serde(skip_serializing_if = "Option::is_none")]
702    pub text: Option<ResponseTextParam>,
703
704    /// How the model should select which tool (or tools) to use when generating
705    /// a response. See the `tools` parameter to see how to specify which tools
706    /// the model can call.
707    #[serde(skip_serializing_if = "Option::is_none")]
708    pub tool_choice: Option<ToolChoiceParam>,
709
710    /// An array of tools the model may call while generating a response. You
711    /// can specify which tool to use by setting the `tool_choice` parameter.
712    ///
713    /// We support the following categories of tools:
714    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
715    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
716    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
717    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
718    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
719    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
720    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
721    /// - **Function calls (custom tools)**: Functions that are defined by you,
722    ///   enabling the model to call your own code with strongly typed arguments
723    ///   and outputs. Learn more about
724    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
725    ///   custom tools to call your own code.
726    #[serde(skip_serializing_if = "Option::is_none")]
727    pub tools: Option<Vec<Tool>>,
728
729    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
730    /// token position, each with an associated log probability.
731    #[serde(skip_serializing_if = "Option::is_none")]
732    pub top_logprobs: Option<u8>,
733
734    /// An alternative to sampling with temperature, called nucleus sampling,
735    /// where the model considers the results of the tokens with top_p probability
736    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
737    /// are considered.
738    ///
739    /// We generally recommend altering this or `temperature` but not both.
740    #[serde(skip_serializing_if = "Option::is_none")]
741    pub top_p: Option<f32>,
742
743    ///The truncation strategy to use for the model response.
744    /// - `auto`: If the input to this Response exceeds
745    ///   the model's context window size, the model will truncate the
746    ///   response to fit the context window by dropping items from the beginning of the conversation.
747    /// - `disabled` (default): If the input size will exceed the context window
748    ///   size for a model, the request will fail with a 400 error.
749    #[serde(skip_serializing_if = "Option::is_none")]
750    pub truncation: Option<Truncation>,
751}
752
753#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
754#[serde(untagged)]
755pub enum ResponsePromptVariables {
756    String(String),
757    Content(InputContent),
758    Custom(serde_json::Value),
759}
760
761#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
762pub struct Prompt {
763    /// The unique identifier of the prompt template to use.
764    pub id: String,
765
766    /// Optional version of the prompt template.
767    #[serde(skip_serializing_if = "Option::is_none")]
768    pub version: Option<String>,
769
770    /// Optional map of values to substitute in for variables in your
771    /// prompt. The substitution values can either be strings, or other
772    /// Response input types like images or files.
773    #[serde(skip_serializing_if = "Option::is_none")]
774    pub variables: Option<ResponsePromptVariables>,
775}
776
777#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
778#[serde(rename_all = "lowercase")]
779pub enum ServiceTier {
780    #[default]
781    Auto,
782    Default,
783    Flex,
784    Scale,
785    Priority,
786}
787
788/// Truncation strategies.
789#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
790#[serde(rename_all = "lowercase")]
791pub enum Truncation {
792    Auto,
793    Disabled,
794}
795
796#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
797pub struct Billing {
798    pub payer: String,
799}
800
801/// o-series reasoning settings.
802#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
803#[builder(
804    name = "ReasoningArgs",
805    pattern = "mutable",
806    setter(into, strip_option),
807    default
808)]
809#[builder(build_fn(error = "OpenAIError"))]
810pub struct Reasoning {
811    /// Constrains effort on reasoning for
812    /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
813    /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
814    /// reasoning effort can result in faster responses and fewer tokens used
815    /// on reasoning in a response.
816    ///
817    /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
818    #[serde(skip_serializing_if = "Option::is_none")]
819    pub effort: Option<ReasoningEffort>,
820    /// A summary of the reasoning performed by the model. This can be
821    /// useful for debugging and understanding the model's reasoning process.
822    /// One of `auto`, `concise`, or `detailed`.
823    ///
824    /// `concise` is supported for `computer-use-preview` models and all reasoning models after
825    /// `gpt-5`.
826    #[serde(skip_serializing_if = "Option::is_none")]
827    pub summary: Option<ReasoningSummary>,
828}
829
830/// o-series reasoning settings.
831#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
832#[serde(rename_all = "lowercase")]
833pub enum Verbosity {
834    Low,
835    Medium,
836    High,
837}
838
839#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
840#[serde(rename_all = "lowercase")]
841pub enum ReasoningSummary {
842    Auto,
843    Concise,
844    Detailed,
845}
846
847/// The retention policy for the prompt cache.
848#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
849pub enum PromptCacheRetention {
850    #[serde(rename = "in_memory")]
851    InMemory,
852    #[serde(rename = "24h")]
853    Hours24,
854}
855
856/// Configuration for text response format.
857#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
858pub struct ResponseTextParam {
859    /// An object specifying the format that the model must output.
860    ///
861    /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
862    /// which ensures the model will match your supplied JSON schema. Learn more in the
863    /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
864    ///
865    /// The default format is `{ "type": "text" }` with no additional options.
866    ///
867    /// **Not recommended for gpt-4o and newer models:**
868    ///
869    /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
870    /// ensures the message the model generates is valid JSON. Using `json_schema`
871    /// is preferred for models that support it.
872    pub format: TextResponseFormatConfiguration,
873
874    /// Constrains the verbosity of the model's response. Lower values will result in
875    /// more concise responses, while higher values will result in more verbose responses.
876    ///
877    /// Currently supported values are `low`, `medium`, and `high`.
878    #[serde(skip_serializing_if = "Option::is_none")]
879    pub verbosity: Option<Verbosity>,
880}
881
882#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
883#[serde(tag = "type", rename_all = "snake_case")]
884pub enum TextResponseFormatConfiguration {
885    /// Default response format. Used to generate text responses.
886    Text,
887    /// JSON object response format. An older method of generating JSON responses.
888    /// Using `json_schema` is recommended for models that support it.
889    /// Note that the model will not generate JSON without a system or user message
890    /// instructing it to do so.
891    JsonObject,
892    /// JSON Schema response format. Used to generate structured JSON responses.
893    /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
894    JsonSchema(ResponseFormatJsonSchema),
895}
896
897/// Definitions for model-callable tools.
898#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
899#[serde(tag = "type", rename_all = "snake_case")]
900pub enum Tool {
901    /// Defines a function in your own code the model can choose to call. Learn more about [function
902    /// calling](https://platform.openai.com/docs/guides/tools).
903    Function(FunctionTool),
904    /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
905    /// tool](https://platform.openai.com/docs/guides/tools-file-search).
906    FileSearch(FileSearchTool),
907    /// A tool that controls a virtual computer. Learn more about the [computer
908    /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
909    ComputerUsePreview(ComputerUsePreviewTool),
910    /// Search the Internet for sources related to the prompt. Learn more about the
911    /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
912    WebSearch(WebSearchTool),
913    /// type: web_search_2025_08_26
914    #[serde(rename = "web_search_2025_08_26")]
915    WebSearch20250826(WebSearchTool),
916    /// Give the model access to additional tools via remote Model Context Protocol
917    /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
918    Mcp(MCPTool),
919    /// A tool that runs Python code to help generate a response to a prompt.
920    CodeInterpreter(CodeInterpreterTool),
921    /// A tool that generates images using a model like `gpt-image-1`.
922    ImageGeneration(ImageGenTool),
923    /// A tool that allows the model to execute shell commands in a local environment.
924    LocalShell,
925    /// A tool that allows the model to execute shell commands.
926    Shell(FunctionShellToolParam),
927    /// A custom tool that processes input using a specified format. Learn more about   [custom
928    /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
929    Custom(CustomToolParam),
930    /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
931    ///tool](https://platform.openai.com/docs/guides/tools-web-search).
932    WebSearchPreview(WebSearchTool),
933    /// type: web_search_preview_2025_03_11
934    #[serde(rename = "web_search_preview_2025_03_11")]
935    WebSearchPreview20250311(WebSearchTool),
936    /// Allows the assistant to create, delete, or update files using unified diffs.
937    ApplyPatch,
938}
939
940#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
941pub struct CustomToolParam {
942    /// The name of the custom tool, used to identify it in tool calls.
943    pub name: String,
944    /// Optional description of the custom tool, used to provide more context.
945    pub description: Option<String>,
946    /// The input format for the custom tool. Default is unconstrained text.
947    pub format: CustomToolParamFormat,
948}
949
950#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
951#[serde(tag = "type", rename_all = "lowercase")]
952pub enum CustomToolParamFormat {
953    /// Unconstrained free-form text.
954    #[default]
955    Text,
956    /// A grammar defined by the user.
957    Grammar(CustomGrammarFormatParam),
958}
959
960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
961#[builder(
962    name = "FileSearchToolArgs",
963    pattern = "mutable",
964    setter(into, strip_option),
965    default
966)]
967#[builder(build_fn(error = "OpenAIError"))]
968pub struct FileSearchTool {
969    /// The IDs of the vector stores to search.
970    pub vector_store_ids: Vec<String>,
971    /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
972    #[serde(skip_serializing_if = "Option::is_none")]
973    pub max_num_results: Option<u32>,
974    /// A filter to apply.
975    #[serde(skip_serializing_if = "Option::is_none")]
976    pub filters: Option<Filter>,
977    /// Ranking options for search.
978    #[serde(skip_serializing_if = "Option::is_none")]
979    pub ranking_options: Option<RankingOptions>,
980}
981
982#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
983#[builder(
984    name = "FunctionToolArgs",
985    pattern = "mutable",
986    setter(into, strip_option),
987    default
988)]
989pub struct FunctionTool {
990    /// The name of the function to call.
991    pub name: String,
992    /// A JSON schema object describing the parameters of the function.
993    #[serde(skip_serializing_if = "Option::is_none")]
994    pub parameters: Option<serde_json::Value>,
995    /// Whether to enforce strict parameter validation. Default `true`.
996    #[serde(skip_serializing_if = "Option::is_none")]
997    pub strict: Option<bool>,
998    /// A description of the function. Used by the model to determine whether or not to call the
999    /// function.
1000    #[serde(skip_serializing_if = "Option::is_none")]
1001    pub description: Option<String>,
1002}
1003
1004#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1005pub struct WebSearchToolFilters {
1006    /// Allowed domains for the search. If not provided, all domains are allowed.
1007    /// Subdomains of the provided domains are allowed as well.
1008    ///
1009    /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1010    #[serde(skip_serializing_if = "Option::is_none")]
1011    pub allowed_domains: Option<Vec<String>>,
1012}
1013
1014#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1015#[builder(
1016    name = "WebSearchToolArgs",
1017    pattern = "mutable",
1018    setter(into, strip_option),
1019    default
1020)]
1021pub struct WebSearchTool {
1022    /// Filters for the search.
1023    #[serde(skip_serializing_if = "Option::is_none")]
1024    pub filters: Option<WebSearchToolFilters>,
1025    /// The approximate location of the user.
1026    #[serde(skip_serializing_if = "Option::is_none")]
1027    pub user_location: Option<WebSearchApproximateLocation>,
1028    /// High level guidance for the amount of context window space to use for the search. One of `low`,
1029    /// `medium`, or `high`. `medium` is the default.
1030    #[serde(skip_serializing_if = "Option::is_none")]
1031    pub search_context_size: Option<WebSearchToolSearchContextSize>,
1032}
1033
1034#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1035#[serde(rename_all = "lowercase")]
1036pub enum WebSearchToolSearchContextSize {
1037    Low,
1038    #[default]
1039    Medium,
1040    High,
1041}
1042
1043#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1044#[serde(rename_all = "lowercase")]
1045pub enum ComputerEnvironment {
1046    Windows,
1047    Mac,
1048    Linux,
1049    Ubuntu,
1050    #[default]
1051    Browser,
1052}
1053
1054#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1055#[builder(
1056    name = "ComputerUsePreviewToolArgs",
1057    pattern = "mutable",
1058    setter(into, strip_option),
1059    default
1060)]
1061pub struct ComputerUsePreviewTool {
1062    /// The type of computer environment to control.
1063    environment: ComputerEnvironment,
1064    /// The width of the computer display.
1065    display_width: u32,
1066    /// The height of the computer display.
1067    display_height: u32,
1068}
1069
1070#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1071pub enum RankVersionType {
1072    #[serde(rename = "auto")]
1073    Auto,
1074    #[serde(rename = "default-2024-11-15")]
1075    Default20241115,
1076}
1077
1078#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1079pub struct HybridSearch {
1080    /// The weight of the embedding in the reciprocal ranking fusion.
1081    pub embedding_weight: f32,
1082    /// The weight of the text in the reciprocal ranking fusion.
1083    pub text_weight: f32,
1084}
1085
1086/// Options for search result ranking.
1087#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1088pub struct RankingOptions {
1089    /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1090    /// sparse keyword matches when hybrid search is enabled.
1091    #[serde(skip_serializing_if = "Option::is_none")]
1092    pub hybrid_search: Option<HybridSearch>,
1093    /// The ranker to use for the file search.
1094    pub ranker: RankVersionType,
1095    /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1096    /// attempt to return only the most relevant results, but may return fewer results.
1097    #[serde(skip_serializing_if = "Option::is_none")]
1098    pub score_threshold: Option<f32>,
1099}
1100
1101#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1102#[serde(rename_all = "lowercase")]
1103pub enum WebSearchApproximateLocationType {
1104    #[default]
1105    Approximate,
1106}
1107
1108/// Approximate user location for web search.
1109#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1110#[builder(
1111    name = "WebSearchApproximateLocationArgs",
1112    pattern = "mutable",
1113    setter(into, strip_option),
1114    default
1115)]
1116#[builder(build_fn(error = "OpenAIError"))]
1117pub struct WebSearchApproximateLocation {
1118    /// The type of location approximation. Defaults to `approximate` when omitted in JSON input.
1119    #[serde(default)]
1120    pub r#type: WebSearchApproximateLocationType,
1121    /// Free text input for the city of the user, e.g. `San Francisco`.
1122    #[serde(skip_serializing_if = "Option::is_none")]
1123    pub city: Option<String>,
1124    /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1125    /// e.g. `US`.
1126    #[serde(skip_serializing_if = "Option::is_none")]
1127    pub country: Option<String>,
1128    /// Free text input for the region of the user, e.g. `California`.
1129    #[serde(skip_serializing_if = "Option::is_none")]
1130    pub region: Option<String>,
1131    /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1132    /// `America/Los_Angeles`.
1133    #[serde(skip_serializing_if = "Option::is_none")]
1134    pub timezone: Option<String>,
1135}
1136
1137/// Container configuration for a code interpreter.
1138#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1139#[serde(tag = "type", rename_all = "snake_case")]
1140pub enum CodeInterpreterToolContainer {
1141    /// Configuration for a code interpreter container. Optionally specify the IDs of the
1142    /// files to run the code on.
1143    Auto(CodeInterpreterContainerAuto),
1144
1145    /// The container ID.
1146    #[serde(untagged)]
1147    ContainerID(String),
1148}
1149
1150/// Auto configuration for code interpreter container.
1151#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1152pub struct CodeInterpreterContainerAuto {
1153    /// An optional list of uploaded files to make available to your code.
1154    #[serde(skip_serializing_if = "Option::is_none")]
1155    pub file_ids: Option<Vec<String>>,
1156
1157    #[serde(skip_serializing_if = "Option::is_none")]
1158    pub memory_limit: Option<u64>,
1159}
1160
1161#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1162#[builder(
1163    name = "CodeInterpreterToolArgs",
1164    pattern = "mutable",
1165    setter(into, strip_option),
1166    default
1167)]
1168#[builder(build_fn(error = "OpenAIError"))]
1169pub struct CodeInterpreterTool {
1170    /// The code interpreter container. Can be a container ID or an object that
1171    /// specifies uploaded file IDs to make available to your code, along with an
1172    /// optional `memory_limit` setting.
1173    pub container: CodeInterpreterToolContainer,
1174}
1175
1176#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1177pub struct ImageGenToolInputImageMask {
1178    /// Base64-encoded mask image.
1179    #[serde(skip_serializing_if = "Option::is_none")]
1180    pub image_url: Option<String>,
1181    /// File ID for the mask image.
1182    #[serde(skip_serializing_if = "Option::is_none")]
1183    pub file_id: Option<String>,
1184}
1185
1186#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1187#[serde(rename_all = "lowercase")]
1188pub enum InputFidelity {
1189    #[default]
1190    High,
1191    Low,
1192}
1193
1194#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1195#[serde(rename_all = "lowercase")]
1196pub enum ImageGenToolModeration {
1197    #[default]
1198    Auto,
1199    Low,
1200}
1201
1202/// Whether to generate a new image or edit an existing image.
1203#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1204#[serde(rename_all = "lowercase")]
1205pub enum ImageGenActionEnum {
1206    /// Generate a new image.
1207    Generate,
1208    /// Edit an existing image.
1209    Edit,
1210    /// Automatically determine whether to generate or edit.
1211    #[default]
1212    Auto,
1213}
1214
1215/// Image generation tool definition.
1216#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1217#[builder(
1218    name = "ImageGenerationArgs",
1219    pattern = "mutable",
1220    setter(into, strip_option),
1221    default
1222)]
1223#[builder(build_fn(error = "OpenAIError"))]
1224pub struct ImageGenTool {
1225    /// Background type for the generated image. One of `transparent`,
1226    /// `opaque`, or `auto`. Default: `auto`.
1227    #[serde(skip_serializing_if = "Option::is_none")]
1228    pub background: Option<ImageGenToolBackground>,
1229    /// Control how much effort the model will exert to match the style and features, especially facial features,
1230    /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1231    /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1232    #[serde(skip_serializing_if = "Option::is_none")]
1233    pub input_fidelity: Option<InputFidelity>,
1234    /// Optional mask for inpainting. Contains `image_url`
1235    /// (string, optional) and `file_id` (string, optional).
1236    #[serde(skip_serializing_if = "Option::is_none")]
1237    pub input_image_mask: Option<ImageGenToolInputImageMask>,
1238    /// The image generation model to use. Default: `gpt-image-1`.
1239    #[serde(skip_serializing_if = "Option::is_none")]
1240    pub model: Option<String>,
1241    /// Moderation level for the generated image. Default: `auto`.
1242    #[serde(skip_serializing_if = "Option::is_none")]
1243    pub moderation: Option<ImageGenToolModeration>,
1244    /// Compression level for the output image. Default: 100.
1245    #[serde(skip_serializing_if = "Option::is_none")]
1246    pub output_compression: Option<u8>,
1247    /// The output format of the generated image. One of `png`, `webp`, or
1248    /// `jpeg`. Default: `png`.
1249    #[serde(skip_serializing_if = "Option::is_none")]
1250    pub output_format: Option<ImageGenToolOutputFormat>,
1251    /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1252    #[serde(skip_serializing_if = "Option::is_none")]
1253    pub partial_images: Option<u8>,
1254    /// The quality of the generated image. One of `low`, `medium`, `high`,
1255    /// or `auto`. Default: `auto`.
1256    #[serde(skip_serializing_if = "Option::is_none")]
1257    pub quality: Option<ImageGenToolQuality>,
1258    /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1259    /// `1536x1024`, or `auto`. Default: `auto`.
1260    #[serde(skip_serializing_if = "Option::is_none")]
1261    pub size: Option<ImageGenToolSize>,
1262    /// Whether to generate a new image or edit an existing image. Default: `auto`.
1263    #[serde(skip_serializing_if = "Option::is_none")]
1264    pub action: Option<ImageGenActionEnum>,
1265}
1266
1267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1268#[serde(rename_all = "lowercase")]
1269pub enum ImageGenToolBackground {
1270    Transparent,
1271    Opaque,
1272    #[default]
1273    Auto,
1274}
1275
1276#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1277#[serde(rename_all = "lowercase")]
1278pub enum ImageGenToolOutputFormat {
1279    #[default]
1280    Png,
1281    Webp,
1282    Jpeg,
1283}
1284
1285#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1286#[serde(rename_all = "lowercase")]
1287pub enum ImageGenToolQuality {
1288    Low,
1289    Medium,
1290    High,
1291    #[default]
1292    Auto,
1293}
1294
1295#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1296#[serde(rename_all = "lowercase")]
1297pub enum ImageGenToolSize {
1298    #[default]
1299    Auto,
1300    #[serde(rename = "1024x1024")]
1301    Size1024x1024,
1302    #[serde(rename = "1024x1536")]
1303    Size1024x1536,
1304    #[serde(rename = "1536x1024")]
1305    Size1536x1024,
1306}
1307
1308#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1309#[serde(rename_all = "lowercase")]
1310pub enum ToolChoiceAllowedMode {
1311    Auto,
1312    Required,
1313}
1314
1315#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1316pub struct ToolChoiceAllowed {
1317    /// Constrains the tools available to the model to a pre-defined set.
1318    ///
1319    /// `auto` allows the model to pick from among the allowed tools and generate a
1320    /// message.
1321    ///
1322    /// `required` requires the model to call one or more of the allowed tools.
1323    pub mode: ToolChoiceAllowedMode,
1324    /// A list of tool definitions that the model should be allowed to call.
1325    ///
1326    /// For the Responses API, the list of tool definitions might look like:
1327    /// ```json
1328    /// [
1329    ///   { "type": "function", "name": "get_weather" },
1330    ///   { "type": "mcp", "server_label": "deepwiki" },
1331    ///   { "type": "image_generation" }
1332    /// ]
1333    /// ```
1334    pub tools: Vec<serde_json::Value>,
1335}
1336
1337/// The type of hosted tool the model should to use. Learn more about
1338/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1339#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1340#[serde(tag = "type", rename_all = "snake_case")]
1341pub enum ToolChoiceTypes {
1342    FileSearch,
1343    WebSearchPreview,
1344    ComputerUsePreview,
1345    CodeInterpreter,
1346    ImageGeneration,
1347}
1348
1349#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1350pub struct ToolChoiceFunction {
1351    /// The name of the function to call.
1352    pub name: String,
1353}
1354
1355#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1356pub struct ToolChoiceMCP {
1357    /// The name of the tool to call on the server.
1358    pub name: String,
1359    /// The label of the MCP server to use.
1360    pub server_label: String,
1361}
1362
1363#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1364pub struct ToolChoiceCustom {
1365    /// The name of the custom tool to call.
1366    pub name: String,
1367}
1368
1369#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1370#[serde(tag = "type", rename_all = "snake_case")]
1371pub enum ToolChoiceParam {
1372    /// Constrains the tools available to the model to a pre-defined set.
1373    AllowedTools(ToolChoiceAllowed),
1374
1375    /// Use this option to force the model to call a specific function.
1376    Function(ToolChoiceFunction),
1377
1378    /// Use this option to force the model to call a specific tool on a remote MCP server.
1379    Mcp(ToolChoiceMCP),
1380
1381    /// Use this option to force the model to call a custom tool.
1382    Custom(ToolChoiceCustom),
1383
1384    /// Forces the model to call the apply_patch tool when executing a tool call.
1385    ApplyPatch,
1386
1387    /// Forces the model to call the function shell tool when a tool call is required.
1388    Shell,
1389
1390    /// Indicates that the model should use a built-in tool to generate a response.
1391    /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1392    #[serde(untagged)]
1393    Hosted(ToolChoiceTypes),
1394
1395    /// Controls which (if any) tool is called by the model.
1396    ///
1397    /// `none` means the model will not call any tool and instead generates a message.
1398    ///
1399    /// `auto` means the model can pick between generating a message or calling one or
1400    /// more tools.
1401    ///
1402    /// `required` means the model must call one or more tools.
1403    #[serde(untagged)]
1404    Mode(ToolChoiceOptions),
1405}
1406
1407#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1408#[serde(rename_all = "lowercase")]
1409pub enum ToolChoiceOptions {
1410    None,
1411    Auto,
1412    Required,
1413}
1414
1415/// An error that occurred while generating the response.
1416#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1417pub struct ErrorObject {
1418    /// A machine-readable error code that was returned.
1419    pub code: String,
1420    /// A human-readable description of the error that was returned.
1421    pub message: String,
1422}
1423
1424/// Details about an incomplete response.
1425#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1426pub struct IncompleteDetails {
1427    /// The reason why the response is incomplete.
1428    pub reason: String,
1429}
1430
1431#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1432pub struct TopLogProb {
1433    pub bytes: Vec<u8>,
1434    pub logprob: f64,
1435    pub token: String,
1436}
1437
1438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1439pub struct LogProb {
1440    pub bytes: Vec<u8>,
1441    pub logprob: f64,
1442    pub token: String,
1443    pub top_logprobs: Vec<TopLogProb>,
1444}
1445
1446#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1447pub struct ResponseTopLobProb {
1448    /// The log probability of this token.
1449    pub logprob: f64,
1450    /// A possible text token.
1451    pub token: String,
1452}
1453
1454#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1455pub struct ResponseLogProb {
1456    /// The log probability of this token.
1457    pub logprob: f64,
1458    /// A possible text token.
1459    pub token: String,
1460    /// The log probability of the top 20 most likely tokens.
1461    pub top_logprobs: Vec<ResponseTopLobProb>,
1462}
1463
1464/// A simple text output from the model.
1465#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1466pub struct OutputTextContent {
1467    /// The annotations of the text output.
1468    pub annotations: Vec<Annotation>,
1469    pub logprobs: Option<Vec<LogProb>>,
1470    /// The text output from the model.
1471    pub text: String,
1472}
1473
1474/// An annotation that applies to a span of output text.
1475#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1476#[serde(tag = "type", rename_all = "snake_case")]
1477pub enum Annotation {
1478    /// A citation to a file.
1479    FileCitation(FileCitationBody),
1480    /// A citation for a web resource used to generate a model response.
1481    UrlCitation(UrlCitationBody),
1482    /// A citation for a container file used to generate a model response.
1483    ContainerFileCitation(ContainerFileCitationBody),
1484    /// A path to a file.
1485    FilePath(FilePath),
1486}
1487
1488#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1489pub struct FileCitationBody {
1490    /// The ID of the file.
1491    file_id: String,
1492    /// The filename of the file cited.
1493    filename: String,
1494    /// The index of the file in the list of files.
1495    index: u32,
1496}
1497
1498#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1499pub struct UrlCitationBody {
1500    /// The index of the last character of the URL citation in the message.
1501    end_index: u32,
1502    /// The index of the first character of the URL citation in the message.
1503    start_index: u32,
1504    /// The title of the web resource.
1505    title: String,
1506    /// The URL of the web resource.
1507    url: String,
1508}
1509
1510#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1511pub struct ContainerFileCitationBody {
1512    /// The ID of the container file.
1513    container_id: String,
1514    /// The index of the last character of the container file citation in the message.
1515    end_index: u32,
1516    /// The ID of the file.
1517    file_id: String,
1518    /// The filename of the container file cited.
1519    filename: String,
1520    /// The index of the first character of the container file citation in the message.
1521    start_index: u32,
1522}
1523
1524#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1525pub struct FilePath {
1526    /// The ID of the file.
1527    file_id: String,
1528    /// The index of the file in the list of files.
1529    index: u32,
1530}
1531
1532/// A refusal explanation from the model.
1533#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1534pub struct RefusalContent {
1535    /// The refusal explanation from the model.
1536    pub refusal: String,
1537}
1538
1539/// A message generated by the model.
1540#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1541pub struct OutputMessage {
1542    /// The content of the output message.
1543    pub content: Vec<OutputMessageContent>,
1544    /// The unique ID of the output message.
1545    pub id: String,
1546    /// The role of the output message. Always `assistant`.
1547    pub role: AssistantRole,
1548    /// The status of the message input. One of `in_progress`, `completed`, or
1549    /// `incomplete`. Populated when input items are returned via API.
1550    pub status: OutputStatus,
1551    ///// The type of the output message. Always `message`.
1552    //pub r#type: MessageType,
1553}
1554
1555#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1556#[serde(rename_all = "lowercase")]
1557pub enum MessageType {
1558    #[default]
1559    Message,
1560}
1561
1562/// The role for an output message - always `assistant`.
1563/// This type ensures type safety by only allowing the assistant role.
1564#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1565#[serde(rename_all = "lowercase")]
1566pub enum AssistantRole {
1567    #[default]
1568    Assistant,
1569}
1570
1571#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1572#[serde(tag = "type", rename_all = "snake_case")]
1573pub enum OutputMessageContent {
1574    /// A text output from the model.
1575    OutputText(OutputTextContent),
1576    /// A refusal from the model.
1577    Refusal(RefusalContent),
1578}
1579
1580#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1581#[serde(tag = "type", rename_all = "snake_case")]
1582pub enum OutputContent {
1583    /// A text output from the model.
1584    OutputText(OutputTextContent),
1585    /// A refusal from the model.
1586    Refusal(RefusalContent),
1587    /// Reasoning text from the model.
1588    ReasoningText(ReasoningTextContent),
1589}
1590
1591#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1592pub struct ReasoningTextContent {
1593    /// The reasoning text from the model.
1594    pub text: String,
1595}
1596
1597/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1598#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1599pub struct ReasoningItem {
1600    /// Unique identifier of the reasoning content.
1601    pub id: String,
1602    /// Reasoning summary content.
1603    pub summary: Vec<SummaryPart>,
1604    /// Reasoning text content.
1605    #[serde(skip_serializing_if = "Option::is_none")]
1606    pub content: Option<Vec<ReasoningTextContent>>,
1607    /// The encrypted content of the reasoning item - populated when a response is generated with
1608    /// `reasoning.encrypted_content` in the `include` parameter.
1609    #[serde(skip_serializing_if = "Option::is_none")]
1610    pub encrypted_content: Option<String>,
1611    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1612    /// Populated when items are returned via API.
1613    #[serde(skip_serializing_if = "Option::is_none")]
1614    pub status: Option<OutputStatus>,
1615}
1616
1617#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1618#[serde(tag = "type", rename_all = "snake_case")]
1619pub enum SummaryPart {
1620    SummaryText(SummaryTextContent),
1621}
1622
1623/// File search tool call output.
1624#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1625pub struct FileSearchToolCall {
1626    /// The unique ID of the file search tool call.
1627    pub id: String,
1628    /// The queries used to search for files.
1629    pub queries: Vec<String>,
1630    /// The status of the file search tool call. One of `in_progress`, `searching`,
1631    /// `incomplete`,`failed`, or `completed`.
1632    pub status: FileSearchToolCallStatus,
1633    /// The results of the file search tool call.
1634    #[serde(skip_serializing_if = "Option::is_none")]
1635    pub results: Option<Vec<FileSearchToolCallResult>>,
1636}
1637
1638#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1639#[serde(rename_all = "snake_case")]
1640pub enum FileSearchToolCallStatus {
1641    InProgress,
1642    Searching,
1643    Incomplete,
1644    Failed,
1645    Completed,
1646}
1647
1648/// A single result from a file search.
1649#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1650pub struct FileSearchToolCallResult {
1651    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1652    /// additional information about the object in a structured format, and querying for objects
1653    /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1654    /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1655    pub attributes: HashMap<String, serde_json::Value>,
1656    /// The unique ID of the file.
1657    pub file_id: String,
1658    /// The name of the file.
1659    pub filename: String,
1660    /// The relevance score of the file - a value between 0 and 1.
1661    pub score: f32,
1662    /// The text that was retrieved from the file.
1663    pub text: String,
1664}
1665
1666#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1667pub struct ComputerCallSafetyCheckParam {
1668    /// The ID of the pending safety check.
1669    pub id: String,
1670    /// The type of the pending safety check.
1671    #[serde(skip_serializing_if = "Option::is_none")]
1672    pub code: Option<String>,
1673    /// Details about the pending safety check.
1674    #[serde(skip_serializing_if = "Option::is_none")]
1675    pub message: Option<String>,
1676}
1677
1678#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1679#[serde(rename_all = "snake_case")]
1680pub enum WebSearchToolCallStatus {
1681    InProgress,
1682    Searching,
1683    Completed,
1684    Failed,
1685}
1686
1687#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1688pub struct WebSearchActionSearchSource {
1689    /// The type of source. Always `url`.
1690    pub r#type: String,
1691    /// The URL of the source.
1692    pub url: String,
1693}
1694
1695#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1696pub struct WebSearchActionSearch {
1697    /// The search query.
1698    pub query: String,
1699    /// The sources used in the search.
1700    pub sources: Option<Vec<WebSearchActionSearchSource>>,
1701}
1702
1703#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1704pub struct WebSearchActionOpenPage {
1705    /// The URL opened by the model.
1706    pub url: Option<String>,
1707}
1708
1709#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1710pub struct WebSearchActionFind {
1711    /// The URL of the page searched for the pattern.
1712    pub url: String,
1713    /// The pattern or text to search for within the page.
1714    pub pattern: String,
1715}
1716
1717#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1718#[serde(tag = "type", rename_all = "snake_case")]
1719pub enum WebSearchToolCallAction {
1720    /// Action type "search" - Performs a web search query.
1721    Search(WebSearchActionSearch),
1722    /// Action type "open_page" - Opens a specific URL from search results.
1723    OpenPage(WebSearchActionOpenPage),
1724    /// Action type "find": Searches for a pattern within a loaded page.
1725    Find(WebSearchActionFind),
1726    /// Action type "find_in_page": https://platform.openai.com/docs/guides/tools-web-search#output-and-citations
1727    FindInPage(WebSearchActionFind),
1728}
1729
1730/// Web search tool call output.
1731#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1732pub struct WebSearchToolCall {
1733    /// An object describing the specific action taken in this web search call. Includes
1734    /// details on how the model used the web (search, open_page, find, find_in_page).
1735    pub action: WebSearchToolCallAction,
1736    /// The unique ID of the web search tool call.
1737    pub id: String,
1738    /// The status of the web search tool call.
1739    pub status: WebSearchToolCallStatus,
1740}
1741
1742/// Output from a computer tool call.
1743#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1744pub struct ComputerToolCall {
1745    pub action: ComputerAction,
1746    /// An identifier used when responding to the tool call with output.
1747    pub call_id: String,
1748    /// The unique ID of the computer call.
1749    pub id: String,
1750    /// The pending safety checks for the computer call.
1751    pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1752    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1753    /// Populated when items are returned via API.
1754    pub status: OutputStatus,
1755}
1756
1757/// An x/y coordinate pair.
1758#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1759pub struct CoordParam {
1760    /// The x-coordinate.
1761    pub x: i32,
1762    /// The y-coordinate.
1763    pub y: i32,
1764}
1765
1766/// Represents all user‐triggered actions.
1767#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1768#[serde(tag = "type", rename_all = "snake_case")]
1769pub enum ComputerAction {
1770    /// A click action.
1771    Click(ClickParam),
1772
1773    /// A double click action.
1774    DoubleClick(DoubleClickAction),
1775
1776    /// A drag action.
1777    Drag(DragParam),
1778
1779    /// A collection of keypresses the model would like to perform.
1780    Keypress(KeyPressAction),
1781
1782    /// A mouse move action.
1783    Move(MoveParam),
1784
1785    /// A screenshot action.
1786    Screenshot,
1787
1788    /// A scroll action.
1789    Scroll(ScrollParam),
1790
1791    /// An action to type in text.
1792    Type(TypeParam),
1793
1794    /// A wait action.
1795    Wait,
1796}
1797
1798#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1799#[serde(rename_all = "lowercase")]
1800pub enum ClickButtonType {
1801    Left,
1802    Right,
1803    Wheel,
1804    Back,
1805    Forward,
1806}
1807
1808/// A click action.
1809#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1810pub struct ClickParam {
1811    /// Indicates which mouse button was pressed during the click. One of `left`,
1812    /// `right`, `wheel`, `back`, or `forward`.
1813    pub button: ClickButtonType,
1814    /// The x-coordinate where the click occurred.
1815    pub x: i32,
1816    /// The y-coordinate where the click occurred.
1817    pub y: i32,
1818}
1819
1820/// A double click action.
1821#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1822pub struct DoubleClickAction {
1823    /// The x-coordinate where the double click occurred.
1824    pub x: i32,
1825    /// The y-coordinate where the double click occurred.
1826    pub y: i32,
1827}
1828
1829/// A drag action.
1830#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1831pub struct DragParam {
1832    /// An array of coordinates representing the path of the drag action.
1833    pub path: Vec<CoordParam>,
1834}
1835
1836/// A keypress action.
1837#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1838pub struct KeyPressAction {
1839    /// The combination of keys the model is requesting to be pressed.
1840    /// This is an array of strings, each representing a key.
1841    pub keys: Vec<String>,
1842}
1843
1844/// A mouse move action.
1845#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1846pub struct MoveParam {
1847    /// The x-coordinate to move to.
1848    pub x: i32,
1849    /// The y-coordinate to move to.
1850    pub y: i32,
1851}
1852
1853/// A scroll action.
1854#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1855pub struct ScrollParam {
1856    /// The horizontal scroll distance.
1857    pub scroll_x: i32,
1858    /// The vertical scroll distance.
1859    pub scroll_y: i32,
1860    /// The x-coordinate where the scroll occurred.
1861    pub x: i32,
1862    /// The y-coordinate where the scroll occurred.
1863    pub y: i32,
1864}
1865
1866/// A typing (text entry) action.
1867#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1868pub struct TypeParam {
1869    /// The text to type.
1870    pub text: String,
1871}
1872
1873#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1874pub struct FunctionToolCall {
1875    /// A JSON string of the arguments to pass to the function.
1876    pub arguments: String,
1877    /// The unique ID of the function tool call generated by the model.
1878    pub call_id: String,
1879    /// The name of the function to run.
1880    pub name: String,
1881    /// The unique ID of the function tool call.
1882    #[serde(skip_serializing_if = "Option::is_none")]
1883    pub id: Option<String>,
1884    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1885    /// Populated when items are returned via API.
1886    #[serde(skip_serializing_if = "Option::is_none")]
1887    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1888}
1889
1890#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1891#[serde(rename_all = "snake_case")]
1892pub enum ImageGenToolCallStatus {
1893    InProgress,
1894    Completed,
1895    Generating,
1896    Failed,
1897}
1898
1899#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1900pub struct ImageGenToolCall {
1901    /// The unique ID of the image generation call.
1902    pub id: String,
1903    /// The generated image encoded in base64.
1904    pub result: Option<String>,
1905    /// The status of the image generation call.
1906    pub status: ImageGenToolCallStatus,
1907}
1908
1909#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1910#[serde(rename_all = "snake_case")]
1911pub enum CodeInterpreterToolCallStatus {
1912    InProgress,
1913    Completed,
1914    Incomplete,
1915    Interpreting,
1916    Failed,
1917}
1918
1919/// Output of a code interpreter request.
1920#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1921pub struct CodeInterpreterToolCall {
1922    /// The code to run, or null if not available.
1923    #[serde(skip_serializing_if = "Option::is_none")]
1924    pub code: Option<String>,
1925    /// ID of the container used to run the code.
1926    pub container_id: String,
1927    /// The unique ID of the code interpreter tool call.
1928    pub id: String,
1929    /// The outputs generated by the code interpreter, such as logs or images.
1930    /// Can be null if no outputs are available.
1931    #[serde(skip_serializing_if = "Option::is_none")]
1932    pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1933    /// The status of the code interpreter tool call.
1934    /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1935    pub status: CodeInterpreterToolCallStatus,
1936}
1937
1938/// Individual result from a code interpreter: either logs or files.
1939#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1940#[serde(tag = "type", rename_all = "snake_case")]
1941pub enum CodeInterpreterToolCallOutput {
1942    /// Code interpreter output logs
1943    Logs(CodeInterpreterOutputLogs),
1944    /// Code interpreter output image
1945    Image(CodeInterpreterOutputImage),
1946}
1947
1948#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1949pub struct CodeInterpreterOutputLogs {
1950    /// The logs output from the code interpreter.
1951    pub logs: String,
1952}
1953
1954#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1955pub struct CodeInterpreterOutputImage {
1956    /// The URL of the image output from the code interpreter.
1957    pub url: String,
1958}
1959
1960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1961pub struct CodeInterpreterFile {
1962    /// The ID of the file.
1963    file_id: String,
1964    /// The MIME type of the file.
1965    mime_type: String,
1966}
1967
1968#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1969pub struct LocalShellToolCall {
1970    /// Execute a shell command on the server.
1971    pub action: LocalShellExecAction,
1972    /// The unique ID of the local shell tool call generated by the model.
1973    pub call_id: String,
1974    /// The unique ID of the local shell call.
1975    pub id: String,
1976    /// The status of the local shell call.
1977    pub status: OutputStatus,
1978}
1979
1980/// Define the shape of a local shell action (exec).
1981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1982pub struct LocalShellExecAction {
1983    /// The command to run.
1984    pub command: Vec<String>,
1985    /// Environment variables to set for the command.
1986    pub env: HashMap<String, String>,
1987    /// Optional timeout in milliseconds for the command.
1988    pub timeout_ms: Option<u64>,
1989    /// Optional user to run the command as.
1990    pub user: Option<String>,
1991    /// Optional working directory to run the command in.
1992    pub working_directory: Option<String>,
1993}
1994
1995/// Commands and limits describing how to run the shell tool call.
1996#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1997pub struct FunctionShellActionParam {
1998    /// Ordered shell commands for the execution environment to run.
1999    pub commands: Vec<String>,
2000    /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
2001    #[serde(skip_serializing_if = "Option::is_none")]
2002    pub timeout_ms: Option<u64>,
2003    /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
2004    #[serde(skip_serializing_if = "Option::is_none")]
2005    pub max_output_length: Option<u64>,
2006}
2007
2008/// Status values reported for shell tool calls.
2009#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2010#[serde(rename_all = "snake_case")]
2011pub enum FunctionShellCallItemStatus {
2012    InProgress,
2013    Completed,
2014    Incomplete,
2015}
2016
2017/// The environment for a shell call item (request side).
2018#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2019#[serde(tag = "type", rename_all = "snake_case")]
2020pub enum FunctionShellCallItemEnvironment {
2021    /// Use a local computer environment.
2022    Local(LocalEnvironmentParam),
2023    /// Reference an existing container by ID.
2024    ContainerReference(ContainerReferenceParam),
2025}
2026
2027/// A tool representing a request to execute one or more shell commands.
2028#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2029pub struct FunctionShellCallItemParam {
2030    /// The unique ID of the shell tool call. Populated when this item is returned via API.
2031    #[serde(skip_serializing_if = "Option::is_none")]
2032    pub id: Option<String>,
2033    /// The unique ID of the shell tool call generated by the model.
2034    pub call_id: String,
2035    /// The shell commands and limits that describe how to run the tool call.
2036    pub action: FunctionShellActionParam,
2037    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2038    #[serde(skip_serializing_if = "Option::is_none")]
2039    pub status: Option<FunctionShellCallItemStatus>,
2040    /// The environment to execute the shell commands in.
2041    #[serde(skip_serializing_if = "Option::is_none")]
2042    pub environment: Option<FunctionShellCallItemEnvironment>,
2043}
2044
2045/// Indicates that the shell commands finished and returned an exit code.
2046#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2047pub struct FunctionShellCallOutputExitOutcomeParam {
2048    /// The exit code returned by the shell process.
2049    pub exit_code: i32,
2050}
2051
2052/// The exit or timeout outcome associated with this chunk.
2053#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2054#[serde(tag = "type", rename_all = "snake_case")]
2055pub enum FunctionShellCallOutputOutcomeParam {
2056    Timeout,
2057    Exit(FunctionShellCallOutputExitOutcomeParam),
2058}
2059
2060/// Captured stdout and stderr for a portion of a shell tool call output.
2061#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2062pub struct FunctionShellCallOutputContentParam {
2063    /// Captured stdout output for this chunk of the shell call.
2064    pub stdout: String,
2065    /// Captured stderr output for this chunk of the shell call.
2066    pub stderr: String,
2067    /// The exit or timeout outcome associated with this chunk.
2068    pub outcome: FunctionShellCallOutputOutcomeParam,
2069}
2070
2071/// The streamed output items emitted by a shell tool call.
2072#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2073pub struct FunctionShellCallOutputItemParam {
2074    /// The unique ID of the shell tool call output. Populated when this item is returned via API.
2075    #[serde(skip_serializing_if = "Option::is_none")]
2076    pub id: Option<String>,
2077    /// The unique ID of the shell tool call generated by the model.
2078    pub call_id: String,
2079    /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2080    pub output: Vec<FunctionShellCallOutputContentParam>,
2081    /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2082    #[serde(skip_serializing_if = "Option::is_none")]
2083    pub max_output_length: Option<u64>,
2084}
2085
2086/// Status values reported for apply_patch tool calls.
2087#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2088#[serde(rename_all = "snake_case")]
2089pub enum ApplyPatchCallStatusParam {
2090    InProgress,
2091    Completed,
2092}
2093
2094/// Instruction for creating a new file via the apply_patch tool.
2095#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2096pub struct ApplyPatchCreateFileOperationParam {
2097    /// Path of the file to create relative to the workspace root.
2098    pub path: String,
2099    /// Unified diff content to apply when creating the file.
2100    pub diff: String,
2101}
2102
2103/// Instruction for deleting an existing file via the apply_patch tool.
2104#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2105pub struct ApplyPatchDeleteFileOperationParam {
2106    /// Path of the file to delete relative to the workspace root.
2107    pub path: String,
2108}
2109
2110/// Instruction for updating an existing file via the apply_patch tool.
2111#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2112pub struct ApplyPatchUpdateFileOperationParam {
2113    /// Path of the file to update relative to the workspace root.
2114    pub path: String,
2115    /// Unified diff content to apply to the existing file.
2116    pub diff: String,
2117}
2118
2119/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2120#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2121#[serde(tag = "type", rename_all = "snake_case")]
2122pub enum ApplyPatchOperationParam {
2123    CreateFile(ApplyPatchCreateFileOperationParam),
2124    DeleteFile(ApplyPatchDeleteFileOperationParam),
2125    UpdateFile(ApplyPatchUpdateFileOperationParam),
2126}
2127
2128/// A tool call representing a request to create, delete, or update files using diff patches.
2129#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2130pub struct ApplyPatchToolCallItemParam {
2131    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2132    #[serde(skip_serializing_if = "Option::is_none")]
2133    pub id: Option<String>,
2134    /// The unique ID of the apply patch tool call generated by the model.
2135    pub call_id: String,
2136    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2137    pub status: ApplyPatchCallStatusParam,
2138    /// The specific create, delete, or update instruction for the apply_patch tool call.
2139    pub operation: ApplyPatchOperationParam,
2140}
2141
2142/// Outcome values reported for apply_patch tool call outputs.
2143#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2144#[serde(rename_all = "snake_case")]
2145pub enum ApplyPatchCallOutputStatusParam {
2146    Completed,
2147    Failed,
2148}
2149
2150/// The streamed output emitted by an apply patch tool call.
2151#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2152pub struct ApplyPatchToolCallOutputItemParam {
2153    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2154    #[serde(skip_serializing_if = "Option::is_none")]
2155    pub id: Option<String>,
2156    /// The unique ID of the apply patch tool call generated by the model.
2157    pub call_id: String,
2158    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2159    pub status: ApplyPatchCallOutputStatusParam,
2160    /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2161    #[serde(skip_serializing_if = "Option::is_none")]
2162    pub output: Option<String>,
2163}
2164
2165/// Shell exec action
2166/// Execute a shell command.
2167#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2168pub struct FunctionShellAction {
2169    /// A list of commands to run.
2170    pub commands: Vec<String>,
2171    /// Optional timeout in milliseconds for the commands.
2172    pub timeout_ms: Option<u64>,
2173    /// Optional maximum number of characters to return from each command.
2174    pub max_output_length: Option<u64>,
2175}
2176
2177/// Status values reported for function shell tool calls.
2178#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2179#[serde(rename_all = "snake_case")]
2180pub enum LocalShellCallStatus {
2181    InProgress,
2182    Completed,
2183    Incomplete,
2184}
2185
2186/// The environment for a shell call (response side).
2187#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2188#[serde(tag = "type", rename_all = "snake_case")]
2189pub enum FunctionShellCallEnvironment {
2190    /// A local computer environment.
2191    Local,
2192    /// A referenced container.
2193    ContainerReference(ContainerReferenceResource),
2194}
2195
2196/// A tool call that executes one or more shell commands in a managed environment.
2197#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2198pub struct FunctionShellCall {
2199    /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2200    pub id: String,
2201    /// The unique ID of the function shell tool call generated by the model.
2202    pub call_id: String,
2203    /// The shell commands and limits that describe how to run the tool call.
2204    pub action: FunctionShellAction,
2205    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2206    pub status: LocalShellCallStatus,
2207    /// The environment in which the shell commands were executed.
2208    pub environment: Option<FunctionShellCallEnvironment>,
2209    /// The ID of the entity that created this tool call.
2210    #[serde(skip_serializing_if = "Option::is_none")]
2211    pub created_by: Option<String>,
2212}
2213
2214/// The content of a shell tool call output that was emitted.
2215#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2216pub struct FunctionShellCallOutputContent {
2217    /// The standard output that was captured.
2218    pub stdout: String,
2219    /// The standard error output that was captured.
2220    pub stderr: String,
2221    /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2222    #[serde(flatten)]
2223    pub outcome: FunctionShellCallOutputOutcome,
2224    /// The identifier of the actor that created the item.
2225    #[serde(skip_serializing_if = "Option::is_none")]
2226    pub created_by: Option<String>,
2227}
2228
2229/// Function shell call outcome
2230#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2231#[serde(tag = "type", rename_all = "snake_case")]
2232pub enum FunctionShellCallOutputOutcome {
2233    Timeout,
2234    Exit(FunctionShellCallOutputExitOutcome),
2235}
2236
2237/// Indicates that the shell commands finished and returned an exit code.
2238#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2239pub struct FunctionShellCallOutputExitOutcome {
2240    /// Exit code from the shell process.
2241    pub exit_code: i32,
2242}
2243
2244/// The output of a shell tool call that was emitted.
2245#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2246pub struct FunctionShellCallOutput {
2247    /// The unique ID of the shell call output. Populated when this item is returned via API.
2248    pub id: String,
2249    /// The unique ID of the shell tool call generated by the model.
2250    pub call_id: String,
2251    /// An array of shell call output contents
2252    pub output: Vec<FunctionShellCallOutputContent>,
2253    /// The maximum length of the shell command output. This is generated by the model and should be
2254    /// passed back with the raw output.
2255    pub max_output_length: Option<u64>,
2256    /// The identifier of the actor that created the item.
2257    #[serde(skip_serializing_if = "Option::is_none")]
2258    pub created_by: Option<String>,
2259}
2260
2261/// Status values reported for apply_patch tool calls.
2262#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2263#[serde(rename_all = "snake_case")]
2264pub enum ApplyPatchCallStatus {
2265    InProgress,
2266    Completed,
2267}
2268
2269/// Instruction describing how to create a file via the apply_patch tool.
2270#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2271pub struct ApplyPatchCreateFileOperation {
2272    /// Path of the file to create.
2273    pub path: String,
2274    /// Diff to apply.
2275    pub diff: String,
2276}
2277
2278/// Instruction describing how to delete a file via the apply_patch tool.
2279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2280pub struct ApplyPatchDeleteFileOperation {
2281    /// Path of the file to delete.
2282    pub path: String,
2283}
2284
2285/// Instruction describing how to update a file via the apply_patch tool.
2286#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2287pub struct ApplyPatchUpdateFileOperation {
2288    /// Path of the file to update.
2289    pub path: String,
2290    /// Diff to apply.
2291    pub diff: String,
2292}
2293
2294/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2295#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2296#[serde(tag = "type", rename_all = "snake_case")]
2297pub enum ApplyPatchOperation {
2298    CreateFile(ApplyPatchCreateFileOperation),
2299    DeleteFile(ApplyPatchDeleteFileOperation),
2300    UpdateFile(ApplyPatchUpdateFileOperation),
2301}
2302
2303/// A tool call that applies file diffs by creating, deleting, or updating files.
2304#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2305pub struct ApplyPatchToolCall {
2306    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2307    pub id: String,
2308    /// The unique ID of the apply patch tool call generated by the model.
2309    pub call_id: String,
2310    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2311    pub status: ApplyPatchCallStatus,
2312    /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2313    pub operation: ApplyPatchOperation,
2314    /// The ID of the entity that created this tool call.
2315    #[serde(skip_serializing_if = "Option::is_none")]
2316    pub created_by: Option<String>,
2317}
2318
2319/// Outcome values reported for apply_patch tool call outputs.
2320#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2321#[serde(rename_all = "snake_case")]
2322pub enum ApplyPatchCallOutputStatus {
2323    Completed,
2324    Failed,
2325}
2326
2327/// The output emitted by an apply patch tool call.
2328#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2329pub struct ApplyPatchToolCallOutput {
2330    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2331    pub id: String,
2332    /// The unique ID of the apply patch tool call generated by the model.
2333    pub call_id: String,
2334    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2335    pub status: ApplyPatchCallOutputStatus,
2336    /// Optional textual output returned by the apply patch tool.
2337    pub output: Option<String>,
2338    /// The ID of the entity that created this tool call output.
2339    #[serde(skip_serializing_if = "Option::is_none")]
2340    pub created_by: Option<String>,
2341}
2342
2343/// Output of an MCP server tool invocation.
2344#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2345pub struct MCPToolCall {
2346    /// A JSON string of the arguments passed to the tool.
2347    pub arguments: String,
2348    /// The unique ID of the tool call.
2349    pub id: String,
2350    /// The name of the tool that was run.
2351    pub name: String,
2352    /// The label of the MCP server running the tool.
2353    pub server_label: String,
2354    /// Unique identifier for the MCP tool call approval request. Include this value
2355    /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2356    /// tool call.
2357    pub approval_request_id: Option<String>,
2358    /// Error message from the call, if any.
2359    pub error: Option<String>,
2360    /// The output from the tool call.
2361    pub output: Option<String>,
2362    /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2363    /// `calling`, or `failed`.
2364    pub status: Option<MCPToolCallStatus>,
2365}
2366
2367#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2368#[serde(rename_all = "snake_case")]
2369pub enum MCPToolCallStatus {
2370    InProgress,
2371    Completed,
2372    Incomplete,
2373    Calling,
2374    Failed,
2375}
2376
2377#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2378pub struct MCPListTools {
2379    /// The unique ID of the list.
2380    pub id: String,
2381    /// The label of the MCP server.
2382    pub server_label: String,
2383    /// The tools available on the server.
2384    pub tools: Vec<MCPListToolsTool>,
2385    /// Error message if listing failed.
2386    #[serde(skip_serializing_if = "Option::is_none")]
2387    pub error: Option<String>,
2388}
2389
2390#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2391pub struct MCPApprovalRequest {
2392    /// JSON string of arguments for the tool.
2393    pub arguments: String,
2394    /// The unique ID of the approval request.
2395    pub id: String,
2396    /// The name of the tool to run.
2397    pub name: String,
2398    /// The label of the MCP server making the request.
2399    pub server_label: String,
2400}
2401
2402#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2403#[serde(untagged)]
2404pub enum Instructions {
2405    /// A text input to the model, equivalent to a text input with the `developer` role.
2406    Text(String),
2407    /// A list of one or many input items to the model, containing different content types.
2408    Array(Vec<InputItem>),
2409}
2410
2411/// The complete response returned by the Responses API.
2412#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2413pub struct Response {
2414    /// Whether to run the model response in the background.
2415    /// [Learn more](https://platform.openai.com/docs/guides/background).
2416    #[serde(skip_serializing_if = "Option::is_none")]
2417    pub background: Option<bool>,
2418
2419    /// Billing information for the response.
2420    #[serde(skip_serializing_if = "Option::is_none")]
2421    pub billing: Option<Billing>,
2422
2423    /// The conversation that this response belongs to. Input items and output
2424    /// items from this response are automatically added to this conversation.
2425    #[serde(skip_serializing_if = "Option::is_none")]
2426    pub conversation: Option<Conversation>,
2427
2428    /// Unix timestamp (in seconds) when this Response was created.
2429    pub created_at: u64,
2430
2431    /// Unix timestamp (in seconds) of when this Response was completed.
2432    /// Only present when the status is `completed`.
2433    #[serde(skip_serializing_if = "Option::is_none")]
2434    pub completed_at: Option<u64>,
2435
2436    /// An error object returned when the model fails to generate a Response.
2437    #[serde(skip_serializing_if = "Option::is_none")]
2438    pub error: Option<ErrorObject>,
2439
2440    /// Unique identifier for this response.
2441    pub id: String,
2442
2443    /// Details about why the response is incomplete, if any.
2444    #[serde(skip_serializing_if = "Option::is_none")]
2445    pub incomplete_details: Option<IncompleteDetails>,
2446
2447    /// A system (or developer) message inserted into the model's context.
2448    ///
2449    /// When using along with `previous_response_id`, the instructions from a previous response
2450    /// will not be carried over to the next response. This makes it simple to swap out
2451    /// system (or developer) messages in new responses.
2452    #[serde(skip_serializing_if = "Option::is_none")]
2453    pub instructions: Option<Instructions>,
2454
2455    /// An upper bound for the number of tokens that can be generated for a response,
2456    /// including visible output tokens and
2457    /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2458    #[serde(skip_serializing_if = "Option::is_none")]
2459    pub max_output_tokens: Option<u32>,
2460
2461    /// Set of 16 key-value pairs that can be attached to an object. This can be
2462    /// useful for storing additional information about the object in a structured
2463    /// format, and querying for objects via API or the dashboard.
2464    ///
2465    /// Keys are strings with a maximum length of 64 characters. Values are strings
2466    /// with a maximum length of 512 characters.
2467    #[serde(skip_serializing_if = "Option::is_none")]
2468    pub metadata: Option<HashMap<String, String>>,
2469
2470    /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2471    /// wide range of models with different capabilities, performance characteristics,
2472    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2473    pub model: String,
2474
2475    /// The object type of this resource - always set to `response`.
2476    pub object: String,
2477
2478    /// An array of content items generated by the model.
2479    ///
2480    /// - The length and order of items in the output array is dependent on the model's response.
2481    /// - Rather than accessing the first item in the output array and assuming it's an assistant
2482    ///   message with the content generated by the model, you might consider using
2483    ///   the `output_text` property where supported in SDKs.
2484    pub output: Vec<OutputItem>,
2485
2486    /// SDK-only convenience property that contains the aggregated text output from all
2487    /// `output_text` items in the `output` array, if any are present.
2488    /// Supported in the Python and JavaScript SDKs.
2489    // #[serde(skip_serializing_if = "Option::is_none")]
2490    // pub output_text: Option<String>,
2491
2492    /// Whether to allow the model to run tool calls in parallel.
2493    #[serde(skip_serializing_if = "Option::is_none")]
2494    pub parallel_tool_calls: Option<bool>,
2495
2496    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2497    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2498    /// Cannot be used in conjunction with `conversation`.
2499    #[serde(skip_serializing_if = "Option::is_none")]
2500    pub previous_response_id: Option<String>,
2501
2502    /// Reference to a prompt template and its variables.
2503    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2504    #[serde(skip_serializing_if = "Option::is_none")]
2505    pub prompt: Option<Prompt>,
2506
2507    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2508    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2509    #[serde(skip_serializing_if = "Option::is_none")]
2510    pub prompt_cache_key: Option<String>,
2511
2512    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2513    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2514    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2515    #[serde(skip_serializing_if = "Option::is_none")]
2516    pub prompt_cache_retention: Option<PromptCacheRetention>,
2517
2518    /// **gpt-5 and o-series models only**
2519    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2520    #[serde(skip_serializing_if = "Option::is_none")]
2521    pub reasoning: Option<Reasoning>,
2522
2523    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2524    /// usage policies.
2525    ///
2526    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2527    /// or email address, in order to avoid sending us any identifying information. [Learn
2528    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2529    #[serde(skip_serializing_if = "Option::is_none")]
2530    pub safety_identifier: Option<String>,
2531
2532    /// Specifies the processing type used for serving the request.
2533    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2534    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2535    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2536    /// - When not set, the default behavior is 'auto'.
2537    ///
2538    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2539    #[serde(skip_serializing_if = "Option::is_none")]
2540    pub service_tier: Option<ServiceTier>,
2541
2542    /// The status of the response generation.
2543    /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2544    pub status: Status,
2545
2546    /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2547    /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2548    ///
2549    /// We generally recommend altering this or `top_p` but not both.
2550    #[serde(skip_serializing_if = "Option::is_none")]
2551    pub temperature: Option<f32>,
2552
2553    /// Configuration options for a text response from the model. Can be plain
2554    /// text or structured JSON data. Learn more:
2555    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2556    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2557    #[serde(skip_serializing_if = "Option::is_none")]
2558    pub text: Option<ResponseTextParam>,
2559
2560    /// How the model should select which tool (or tools) to use when generating
2561    /// a response. See the `tools` parameter to see how to specify which tools
2562    /// the model can call.
2563    #[serde(skip_serializing_if = "Option::is_none")]
2564    pub tool_choice: Option<ToolChoiceParam>,
2565
2566    /// An array of tools the model may call while generating a response. You
2567    /// can specify which tool to use by setting the `tool_choice` parameter.
2568    ///
2569    /// We support the following categories of tools:
2570    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2571    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2572    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2573    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
2574    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2575    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
2576    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2577    /// - **Function calls (custom tools)**: Functions that are defined by you,
2578    ///   enabling the model to call your own code with strongly typed arguments
2579    ///   and outputs. Learn more about
2580    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2581    ///   custom tools to call your own code.
2582    #[serde(skip_serializing_if = "Option::is_none")]
2583    pub tools: Option<Vec<Tool>>,
2584
2585    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2586    /// token position, each with an associated log probability.
2587    #[serde(skip_serializing_if = "Option::is_none")]
2588    pub top_logprobs: Option<u8>,
2589
2590    /// An alternative to sampling with temperature, called nucleus sampling,
2591    /// where the model considers the results of the tokens with top_p probability
2592    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2593    /// are considered.
2594    ///
2595    /// We generally recommend altering this or `temperature` but not both.
2596    #[serde(skip_serializing_if = "Option::is_none")]
2597    pub top_p: Option<f32>,
2598
2599    ///The truncation strategy to use for the model response.
2600    /// - `auto`: If the input to this Response exceeds
2601    ///   the model's context window size, the model will truncate the
2602    ///   response to fit the context window by dropping items from the beginning of the conversation.
2603    /// - `disabled` (default): If the input size will exceed the context window
2604    ///   size for a model, the request will fail with a 400 error.
2605    #[serde(skip_serializing_if = "Option::is_none")]
2606    pub truncation: Option<Truncation>,
2607
2608    /// Represents token usage details including input tokens, output tokens,
2609    /// a breakdown of output tokens, and the total tokens used.
2610    #[serde(skip_serializing_if = "Option::is_none")]
2611    pub usage: Option<ResponseUsage>,
2612}
2613
2614#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2615#[serde(rename_all = "snake_case")]
2616pub enum Status {
2617    Completed,
2618    Failed,
2619    InProgress,
2620    Cancelled,
2621    Queued,
2622    Incomplete,
2623}
2624
2625/// Output item
2626#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2627#[serde(tag = "type")]
2628#[serde(rename_all = "snake_case")]
2629pub enum OutputItem {
2630    /// An output message from the model.
2631    Message(OutputMessage),
2632    /// The results of a file search tool call. See the
2633    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2634    /// for more information.
2635    FileSearchCall(FileSearchToolCall),
2636    /// A tool call to run a function. See the
2637    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2638    /// for more information.
2639    FunctionCall(FunctionToolCall),
2640    /// The results of a web search tool call. See the
2641    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2642    /// for more information.
2643    WebSearchCall(WebSearchToolCall),
2644    /// A tool call to a computer use tool. See the
2645    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2646    /// for more information.
2647    ComputerCall(ComputerToolCall),
2648    /// A description of the chain of thought used by a reasoning model while generating
2649    /// a response. Be sure to include these items in your `input` to the Responses API for
2650    /// subsequent turns of a conversation if you are manually
2651    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2652    Reasoning(ReasoningItem),
2653    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
2654    Compaction(CompactionBody),
2655    /// An image generation request made by the model.
2656    ImageGenerationCall(ImageGenToolCall),
2657    /// A tool call to run code.
2658    CodeInterpreterCall(CodeInterpreterToolCall),
2659    /// A tool call to run a command on the local shell.
2660    LocalShellCall(LocalShellToolCall),
2661    /// A tool call that executes one or more shell commands in a managed environment.
2662    ShellCall(FunctionShellCall),
2663    /// The output of a shell tool call.
2664    ShellCallOutput(FunctionShellCallOutput),
2665    /// A tool call that applies file diffs by creating, deleting, or updating files.
2666    ApplyPatchCall(ApplyPatchToolCall),
2667    /// The output emitted by an apply patch tool call.
2668    ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2669    /// An invocation of a tool on an MCP server.
2670    McpCall(MCPToolCall),
2671    /// A list of tools available on an MCP server.
2672    McpListTools(MCPListTools),
2673    /// A request for human approval of a tool invocation.
2674    McpApprovalRequest(MCPApprovalRequest),
2675    /// A call to a custom tool created by the model.
2676    CustomToolCall(CustomToolCall),
2677}
2678
2679#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2680#[non_exhaustive]
2681pub struct CustomToolCall {
2682    /// An identifier used to map this custom tool call to a tool call output.
2683    pub call_id: String,
2684    /// The input for the custom tool call generated by the model.
2685    pub input: String,
2686    /// The name of the custom tool being called.
2687    pub name: String,
2688    /// The unique ID of the custom tool call in the OpenAI platform.
2689    pub id: String,
2690}
2691
2692#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2693pub struct DeleteResponse {
2694    pub object: String,
2695    pub deleted: bool,
2696    pub id: String,
2697}
2698
2699#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2700pub struct AnyItemReference {
2701    pub r#type: Option<String>,
2702    pub id: String,
2703}
2704
2705#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2706#[serde(tag = "type", rename_all = "snake_case")]
2707pub enum ItemResourceItem {
2708    Message(MessageItem),
2709    FileSearchCall(FileSearchToolCall),
2710    ComputerCall(ComputerToolCall),
2711    ComputerCallOutput(ComputerCallOutputItemParam),
2712    WebSearchCall(WebSearchToolCall),
2713    FunctionCall(FunctionToolCall),
2714    FunctionCallOutput(FunctionCallOutputItemParam),
2715    ImageGenerationCall(ImageGenToolCall),
2716    CodeInterpreterCall(CodeInterpreterToolCall),
2717    LocalShellCall(LocalShellToolCall),
2718    LocalShellCallOutput(LocalShellToolCallOutput),
2719    ShellCall(FunctionShellCallItemParam),
2720    ShellCallOutput(FunctionShellCallOutputItemParam),
2721    ApplyPatchCall(ApplyPatchToolCallItemParam),
2722    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2723    McpListTools(MCPListTools),
2724    McpApprovalRequest(MCPApprovalRequest),
2725    McpApprovalResponse(MCPApprovalResponse),
2726    McpCall(MCPToolCall),
2727}
2728
2729#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2730#[serde(untagged)]
2731pub enum ItemResource {
2732    ItemReference(AnyItemReference),
2733    Item(ItemResourceItem),
2734}
2735
2736/// A list of Response items.
2737#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2738pub struct ResponseItemList {
2739    /// The type of object returned, must be `list`.
2740    pub object: String,
2741    /// The ID of the first item in the list.
2742    pub first_id: Option<String>,
2743    /// The ID of the last item in the list.
2744    pub last_id: Option<String>,
2745    /// Whether there are more items in the list.
2746    pub has_more: bool,
2747    /// The list of items.
2748    pub data: Vec<ItemResource>,
2749}
2750
2751#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2752#[builder(
2753    name = "TokenCountsBodyArgs",
2754    pattern = "mutable",
2755    setter(into, strip_option),
2756    default
2757)]
2758#[builder(build_fn(error = "OpenAIError"))]
2759pub struct TokenCountsBody {
2760    /// The conversation that this response belongs to. Items from this
2761    /// conversation are prepended to `input_items` for this response request.
2762    /// Input items and output items from this response are automatically added to this
2763    /// conversation after this response completes.
2764    #[serde(skip_serializing_if = "Option::is_none")]
2765    pub conversation: Option<ConversationParam>,
2766
2767    /// Text, image, or file inputs to the model, used to generate a response
2768    #[serde(skip_serializing_if = "Option::is_none")]
2769    pub input: Option<InputParam>,
2770
2771    /// A system (or developer) message inserted into the model's context.
2772    ///
2773    /// When used along with `previous_response_id`, the instructions from a previous response will
2774    /// not be carried over to the next response. This makes it simple to swap out system (or
2775    /// developer) messages in new responses.
2776    #[serde(skip_serializing_if = "Option::is_none")]
2777    pub instructions: Option<String>,
2778
2779    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2780    /// wide range of models with different capabilities, performance characteristics,
2781    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2782    /// to browse and compare available models.
2783    #[serde(skip_serializing_if = "Option::is_none")]
2784    pub model: Option<String>,
2785
2786    /// Whether to allow the model to run tool calls in parallel.
2787    #[serde(skip_serializing_if = "Option::is_none")]
2788    pub parallel_tool_calls: Option<bool>,
2789
2790    /// The unique ID of the previous response to the model. Use this to create multi-turn
2791    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2792    /// Cannot be used in conjunction with `conversation`.
2793    #[serde(skip_serializing_if = "Option::is_none")]
2794    pub previous_response_id: Option<String>,
2795
2796    /// **gpt-5 and o-series models only**
2797    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2798    #[serde(skip_serializing_if = "Option::is_none")]
2799    pub reasoning: Option<Reasoning>,
2800
2801    /// Configuration options for a text response from the model. Can be plain
2802    /// text or structured JSON data. Learn more:
2803    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2804    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2805    #[serde(skip_serializing_if = "Option::is_none")]
2806    pub text: Option<ResponseTextParam>,
2807
2808    /// How the model should select which tool (or tools) to use when generating
2809    /// a response. See the `tools` parameter to see how to specify which tools
2810    /// the model can call.
2811    #[serde(skip_serializing_if = "Option::is_none")]
2812    pub tool_choice: Option<ToolChoiceParam>,
2813
2814    /// An array of tools the model may call while generating a response. You can specify which tool
2815    /// to use by setting the `tool_choice` parameter.
2816    #[serde(skip_serializing_if = "Option::is_none")]
2817    pub tools: Option<Vec<Tool>>,
2818
2819    ///The truncation strategy to use for the model response.
2820    /// - `auto`: If the input to this Response exceeds
2821    ///   the model's context window size, the model will truncate the
2822    ///   response to fit the context window by dropping items from the beginning of the conversation.
2823    /// - `disabled` (default): If the input size will exceed the context window
2824    ///   size for a model, the request will fail with a 400 error.
2825    #[serde(skip_serializing_if = "Option::is_none")]
2826    pub truncation: Option<Truncation>,
2827}
2828
2829#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2830pub struct TokenCountsResource {
2831    pub object: String,
2832    pub input_tokens: u32,
2833}
2834
2835/// A compaction item generated by the `/v1/responses/compact` API.
2836#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2837pub struct CompactionSummaryItemParam {
2838    /// The ID of the compaction item.
2839    #[serde(skip_serializing_if = "Option::is_none")]
2840    pub id: Option<String>,
2841    /// The encrypted content of the compaction summary.
2842    pub encrypted_content: String,
2843}
2844
2845/// A compaction item generated by the `/v1/responses/compact` API.
2846#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2847pub struct CompactionBody {
2848    /// The unique ID of the compaction item.
2849    pub id: String,
2850    /// The encrypted content that was produced by compaction.
2851    pub encrypted_content: String,
2852    /// The identifier of the actor that created the item.
2853    #[serde(skip_serializing_if = "Option::is_none")]
2854    pub created_by: Option<String>,
2855}
2856
2857/// Request to compact a conversation.
2858#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
2859#[builder(name = "CompactResponseRequestArgs")]
2860#[builder(pattern = "mutable")]
2861#[builder(setter(into, strip_option), default)]
2862#[builder(derive(Debug))]
2863#[builder(build_fn(error = "OpenAIError"))]
2864pub struct CompactResponseRequest {
2865    /// Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of models
2866    /// with different capabilities, performance characteristics, and price points. Refer to the
2867    /// [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2868    pub model: String,
2869
2870    /// Text, image, or file inputs to the model, used to generate a response
2871    #[serde(skip_serializing_if = "Option::is_none")]
2872    pub input: Option<InputParam>,
2873
2874    /// The unique ID of the previous response to the model. Use this to create multi-turn
2875    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2876    /// Cannot be used in conjunction with `conversation`.
2877    #[serde(skip_serializing_if = "Option::is_none")]
2878    pub previous_response_id: Option<String>,
2879
2880    /// A system (or developer) message inserted into the model's context.
2881    ///
2882    /// When used along with `previous_response_id`, the instructions from a previous response will
2883    /// not be carried over to the next response. This makes it simple to swap out system (or
2884    /// developer) messages in new responses.
2885    #[serde(skip_serializing_if = "Option::is_none")]
2886    pub instructions: Option<String>,
2887}
2888
2889/// The compacted response object.
2890#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2891pub struct CompactResource {
2892    /// The unique identifier for the compacted response.
2893    pub id: String,
2894    /// The object type. Always `response.compaction`.
2895    pub object: String,
2896    /// The compacted list of output items. This is a list of all user messages,
2897    /// followed by a single compaction item.
2898    pub output: Vec<OutputItem>,
2899    /// Unix timestamp (in seconds) when the compacted conversation was created.
2900    pub created_at: u64,
2901    /// Token accounting for the compaction pass, including cached, reasoning, and total tokens.
2902    pub usage: ResponseUsage,
2903}
2904
2905// ============================================================
2906// Container / Environment Types
2907// ============================================================
2908
2909/// A domain-scoped secret injected for allowlisted domains.
2910#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2911pub struct ContainerNetworkPolicyDomainSecretParam {
2912    /// The domain associated with the secret.
2913    pub domain: String,
2914    /// The name of the secret to inject for the domain.
2915    pub name: String,
2916    /// The secret value to inject for the domain.
2917    pub value: String,
2918}
2919
2920/// Details for an allowlist network policy.
2921#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2922pub struct ContainerNetworkPolicyAllowlistDetails {
2923    /// A list of allowed domains.
2924    pub allowed_domains: Vec<String>,
2925    /// Optional domain-scoped secrets for allowlisted domains.
2926    #[serde(skip_serializing_if = "Option::is_none")]
2927    pub domain_secrets: Option<Vec<ContainerNetworkPolicyDomainSecretParam>>,
2928}
2929
2930/// Network access policy for a container.
2931#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2932#[serde(tag = "type", rename_all = "snake_case")]
2933pub enum ContainerNetworkPolicy {
2934    /// Disable all outbound network access.
2935    Disabled,
2936    /// Allow access only to specified domains.
2937    Allowlist(ContainerNetworkPolicyAllowlistDetails),
2938}
2939
2940/// A skill referenced by ID.
2941#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2942pub struct SkillReferenceParam {
2943    /// The ID of the skill to reference.
2944    pub skill_id: String,
2945    /// An optional specific version to use.
2946    #[serde(skip_serializing_if = "Option::is_none")]
2947    pub version: Option<String>,
2948}
2949
2950/// An inline skill source (base64-encoded zip).
2951#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2952pub struct InlineSkillSourceParam {
2953    /// The media type. Always `"application/zip"`.
2954    pub media_type: String,
2955    /// The base64-encoded skill data.
2956    pub data: String,
2957}
2958
2959/// An inline skill definition.
2960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2961pub struct InlineSkillParam {
2962    /// The name of the skill.
2963    pub name: String,
2964    /// The description of the skill.
2965    pub description: String,
2966    /// The inline source for the skill.
2967    pub source: InlineSkillSourceParam,
2968}
2969
2970/// A skill parameter — either a reference or inline definition.
2971#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2972#[serde(tag = "type", rename_all = "snake_case")]
2973pub enum SkillParam {
2974    /// Reference a skill by ID.
2975    SkillReference(SkillReferenceParam),
2976    /// Provide an inline skill definition.
2977    Inline(InlineSkillParam),
2978}
2979
2980/// Automatically creates a container for the request.
2981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2982pub struct ContainerAutoParam {
2983    /// An optional list of uploaded file IDs to make available in the container.
2984    #[serde(skip_serializing_if = "Option::is_none")]
2985    pub file_ids: Option<Vec<String>>,
2986    /// Network access policy for the container.
2987    #[serde(skip_serializing_if = "Option::is_none")]
2988    pub network_policy: Option<ContainerNetworkPolicy>,
2989    /// An optional list of skills to make available in the container.
2990    #[serde(skip_serializing_if = "Option::is_none")]
2991    pub skills: Option<Vec<SkillParam>>,
2992}
2993
2994/// A local skill available in a local environment.
2995#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2996pub struct LocalSkillParam {
2997    /// The name of the skill.
2998    pub name: String,
2999    /// The description of the skill.
3000    pub description: String,
3001    /// The path to the directory containing the skill.
3002    pub path: String,
3003}
3004
3005/// Uses a local computer environment.
3006#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3007pub struct LocalEnvironmentParam {
3008    /// An optional list of local skills.
3009    #[serde(skip_serializing_if = "Option::is_none")]
3010    pub skills: Option<Vec<LocalSkillParam>>,
3011}
3012
3013/// References a container created with the /v1/containers endpoint.
3014#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3015pub struct ContainerReferenceParam {
3016    /// The ID of the referenced container.
3017    pub container_id: String,
3018}
3019
3020/// A resource reference to a container by ID.
3021#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3022pub struct ContainerReferenceResource {
3023    /// The ID of the referenced container.
3024    pub container_id: String,
3025}
3026
3027/// The execution environment for a shell tool — container or local.
3028#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3029#[serde(tag = "type", rename_all = "snake_case")]
3030pub enum FunctionShellEnvironment {
3031    /// Automatically creates a container for this request.
3032    ContainerAuto(ContainerAutoParam),
3033    /// Use a local computer environment.
3034    Local(LocalEnvironmentParam),
3035    /// Reference an existing container by ID.
3036    ContainerReference(ContainerReferenceParam),
3037}
3038
3039/// Parameters for the shell function tool.
3040#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3041pub struct FunctionShellToolParam {
3042    /// The execution environment for the shell tool.
3043    #[serde(skip_serializing_if = "Option::is_none")]
3044    pub environment: Option<FunctionShellEnvironment>,
3045}
3046
3047/// Context management configuration.
3048#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3049pub struct ContextManagementParam {
3050    /// The context management strategy type.
3051    #[serde(rename = "type")]
3052    pub type_: String,
3053    /// Minimum number of tokens to retain before compacting.
3054    #[serde(skip_serializing_if = "Option::is_none")]
3055    pub compact_threshold: Option<u32>,
3056}