Skip to main content

async_openai/types/responses/
response.rs

1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4    CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5    ResponseUsage, SummaryTextContent,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15    #[default]
16    User,
17    Assistant,
18    System,
19    Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26    InProgress,
27    Completed,
28    Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34    ///  A text input to the model, equivalent to a text input with the
35    /// `user` role.
36    Text(String),
37    /// A list of one or many input items to the model, containing
38    /// different content types.
39    Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52    /// A message (type: "message").
53    /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54    ///
55    /// InputMessage:
56    ///     A message input to the model with a role indicating instruction following hierarchy.
57    ///     Instructions given with the developer or system role take precedence over instructions given with the user role.
58    /// OutputMessage:
59    ///     A message output from the model.
60    Message(MessageItem),
61
62    /// The results of a file search tool call. See the
63    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64    FileSearchCall(FileSearchToolCall),
65
66    /// A tool call to a computer use tool. See the
67    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68    ComputerCall(ComputerToolCall),
69
70    /// The output of a computer tool call.
71    ComputerCallOutput(ComputerCallOutputItemParam),
72
73    /// The results of a web search tool call. See the
74    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75    WebSearchCall(WebSearchToolCall),
76
77    /// A tool call to run a function. See the
78    ///
79    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80    FunctionCall(FunctionToolCall),
81
82    /// The output of a function tool call.
83    FunctionCallOutput(FunctionCallOutputItemParam),
84
85    /// A description of the chain of thought used by a reasoning model while generating
86    /// a response. Be sure to include these items in your `input` to the Responses API
87    /// for subsequent turns of a conversation if you are manually
88    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89    Reasoning(ReasoningItem),
90
91    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
92    Compaction(CompactionSummaryItemParam),
93
94    /// An image generation request made by the model.
95    ImageGenerationCall(ImageGenToolCall),
96
97    /// A tool call to run code.
98    CodeInterpreterCall(CodeInterpreterToolCall),
99
100    /// A tool call to run a command on the local shell.
101    LocalShellCall(LocalShellToolCall),
102
103    /// The output of a local shell tool call.
104    LocalShellCallOutput(LocalShellToolCallOutput),
105
106    /// A tool representing a request to execute one or more shell commands.
107    ShellCall(FunctionShellCallItemParam),
108
109    /// The streamed output items emitted by a shell tool call.
110    ShellCallOutput(FunctionShellCallOutputItemParam),
111
112    /// A tool call representing a request to create, delete, or update files using diff patches.
113    ApplyPatchCall(ApplyPatchToolCallItemParam),
114
115    /// The streamed output emitted by an apply patch tool call.
116    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
117
118    /// A list of tools available on an MCP server.
119    McpListTools(MCPListTools),
120
121    /// A request for human approval of a tool invocation.
122    McpApprovalRequest(MCPApprovalRequest),
123
124    /// A response to an MCP approval request.
125    McpApprovalResponse(MCPApprovalResponse),
126
127    /// An invocation of a tool on an MCP server.
128    McpCall(MCPToolCall),
129
130    /// The output of a custom tool call from your code, being sent back to the model.
131    CustomToolCallOutput(CustomToolCallOutput),
132
133    /// A call to a custom tool created by the model.
134    CustomToolCall(CustomToolCall),
135}
136
137/// Input item that can be used in the context for generating a response.
138///
139/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
140/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
141/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
142/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
143///
144/// Uses untagged deserialization because these types overlap in structure.
145/// Order matters: more specific structures are tried first.
146///
147/// # OpenAPI Specification
148/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
150#[serde(untagged)]
151pub enum InputItem {
152    /// A reference to an existing item by ID.
153    /// Has a required `id` field and optional `type` (can be "item_reference" or null).
154    /// Must be tried first as it's the most minimal structure.
155    ItemReference(ItemReference),
156
157    /// All structured items with proper type discrimination.
158    /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
159    /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
160    Item(Item),
161
162    /// A simple, user-friendly message input (EasyInputMessage).
163    /// Supports string content and can include assistant role for previous responses.
164    /// Must be tried last as it's the most flexible structure.
165    ///
166    /// A message input to the model with a role indicating instruction following
167    /// hierarchy. Instructions given with the `developer` or `system` role take
168    /// precedence over instructions given with the `user` role. Messages with the
169    /// `assistant` role are presumed to have been generated by the model in previous
170    /// interactions.
171    EasyMessage(EasyInputMessage),
172}
173
174/// A message item used within the `Item` enum.
175///
176/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
177/// enum to distinguish them based on their structure:
178/// - OutputMessage: role=assistant, required id & status fields
179/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
180///
181/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
182/// not part of the structured `Item` enum.
183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
184#[serde(untagged)]
185pub enum MessageItem {
186    /// An output message from the model (role: assistant, has required id & status).
187    /// This must come first as it has the most specific structure (required id and status fields).
188    Output(OutputMessage),
189
190    /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
191    /// Has structured content list and optional id/status fields.
192    ///
193    /// A message input to the model with a role indicating instruction following hierarchy.
194    /// Instructions given with the `developer` or `system` role take precedence over instructions
195    /// given with the `user` role.
196    Input(InputMessage),
197}
198
199/// A reference to an existing item by ID.
200#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
201pub struct ItemReference {
202    /// The type of item to reference. Can be "item_reference" or null.
203    #[serde(skip_serializing_if = "Option::is_none")]
204    pub r#type: Option<ItemReferenceType>,
205    /// The ID of the item to reference.
206    pub id: String,
207}
208
209#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
210#[serde(rename_all = "snake_case")]
211pub enum ItemReferenceType {
212    ItemReference,
213}
214
215/// Output from a function call that you're providing back to the model.
216#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
217pub struct FunctionCallOutputItemParam {
218    /// The unique ID of the function tool call generated by the model.
219    pub call_id: String,
220    /// Text, image, or file output of the function tool call.
221    pub output: FunctionCallOutput,
222    /// The unique ID of the function tool call output.
223    /// Populated when this item is returned via API.
224    #[serde(skip_serializing_if = "Option::is_none")]
225    pub id: Option<String>,
226    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
227    /// Populated when items are returned via API.
228    #[serde(skip_serializing_if = "Option::is_none")]
229    pub status: Option<OutputStatus>,
230}
231
232#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
233#[serde(untagged)]
234pub enum FunctionCallOutput {
235    /// A JSON string of the output of the function tool call.
236    Text(String),
237    Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
238}
239
240#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
241pub struct ComputerCallOutputItemParam {
242    /// The ID of the computer tool call that produced the output.
243    pub call_id: String,
244    /// A computer screenshot image used with the computer use tool.
245    pub output: ComputerScreenshotImage,
246    /// The safety checks reported by the API that have been acknowledged by the developer.
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
249    /// The unique ID of the computer tool call output. Optional when creating.
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub id: Option<String>,
252    /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
253    /// Populated when input items are returned via API.
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
256}
257
258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
259#[serde(rename_all = "snake_case")]
260pub enum ComputerScreenshotImageType {
261    ComputerScreenshot,
262}
263
264/// A computer screenshot image used with the computer use tool.
265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
266pub struct ComputerScreenshotImage {
267    /// Specifies the event type. For a computer screenshot, this property is always
268    /// set to `computer_screenshot`.
269    pub r#type: ComputerScreenshotImageType,
270    /// The identifier of an uploaded file that contains the screenshot.
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub file_id: Option<String>,
273    /// The URL of the screenshot image.
274    #[serde(skip_serializing_if = "Option::is_none")]
275    pub image_url: Option<String>,
276}
277
278/// Output from a local shell tool call that you're providing back to the model.
279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
280pub struct LocalShellToolCallOutput {
281    /// The unique ID of the local shell tool call generated by the model.
282    pub id: String,
283
284    /// A JSON string of the output of the local shell tool call.
285    pub output: String,
286
287    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
288    #[serde(skip_serializing_if = "Option::is_none")]
289    pub status: Option<OutputStatus>,
290}
291
292/// Output from a local shell command execution.
293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
294pub struct LocalShellOutput {
295    /// The stdout output from the command.
296    #[serde(skip_serializing_if = "Option::is_none")]
297    pub stdout: Option<String>,
298
299    /// The stderr output from the command.
300    #[serde(skip_serializing_if = "Option::is_none")]
301    pub stderr: Option<String>,
302
303    /// The exit code of the command.
304    #[serde(skip_serializing_if = "Option::is_none")]
305    pub exit_code: Option<i32>,
306}
307
308/// An MCP approval response that you're providing back to the model.
309#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
310pub struct MCPApprovalResponse {
311    /// The ID of the approval request being answered.
312    pub approval_request_id: String,
313
314    /// Whether the request was approved.
315    pub approve: bool,
316
317    /// The unique ID of the approval response
318    #[serde(skip_serializing_if = "Option::is_none")]
319    pub id: Option<String>,
320
321    /// Optional reason for the decision.
322    #[serde(skip_serializing_if = "Option::is_none")]
323    pub reason: Option<String>,
324}
325
326#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
327#[serde(untagged)]
328pub enum CustomToolCallOutputOutput {
329    /// A string of the output of the custom tool call.
330    Text(String),
331    /// Text, image, or file output of the custom tool call.
332    List(Vec<InputContent>),
333}
334
335#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
336pub struct CustomToolCallOutput {
337    /// The call ID, used to map this custom tool call output to a custom tool call.
338    pub call_id: String,
339
340    /// The output from the custom tool call generated by your code.
341    /// Can be a string or an list of output content.
342    pub output: CustomToolCallOutputOutput,
343
344    /// The unique ID of the custom tool call output in the OpenAI platform.
345    #[serde(skip_serializing_if = "Option::is_none")]
346    pub id: Option<String>,
347}
348
349/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
350///
351/// This is the most user-friendly way to provide messages, supporting both simple
352/// string content and structured content. Role can include `assistant` for providing
353/// previous assistant responses.
354#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
355#[builder(
356    name = "EasyInputMessageArgs",
357    pattern = "mutable",
358    setter(into, strip_option),
359    default
360)]
361#[builder(build_fn(error = "OpenAIError"))]
362pub struct EasyInputMessage {
363    /// The type of the message input. Always set to `message`.
364    pub r#type: MessageType,
365    /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
366    pub role: Role,
367    /// Text, image, or audio input to the model, used to generate a response.
368    /// Can also contain previous assistant responses.
369    pub content: EasyInputContent,
370}
371
372/// A structured message input to the model (InputMessage in the OpenAPI spec).
373///
374/// This variant requires structured content (not a simple string) and does not support
375/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
377#[builder(
378    name = "InputMessageArgs",
379    pattern = "mutable",
380    setter(into, strip_option),
381    default
382)]
383#[builder(build_fn(error = "OpenAIError"))]
384pub struct InputMessage {
385    /// A list of one or many input items to the model, containing different content types.
386    pub content: Vec<InputContent>,
387    /// The role of the message input. One of `user`, `system`, or `developer`.
388    /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
389    pub role: InputRole,
390    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
391    /// Populated when items are returned via API.
392    #[serde(skip_serializing_if = "Option::is_none")]
393    pub status: Option<OutputStatus>,
394    /////The type of the message input. Always set to `message`.
395    //pub r#type: MessageType,
396}
397
398/// The role for an input message - can only be `user`, `system`, or `developer`.
399/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
400#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
401#[serde(rename_all = "lowercase")]
402pub enum InputRole {
403    #[default]
404    User,
405    System,
406    Developer,
407}
408
409/// Content for EasyInputMessage - can be a simple string or structured list.
410#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
411#[serde(untagged)]
412pub enum EasyInputContent {
413    /// A text input to the model.
414    Text(String),
415    /// A list of one or many input items to the model, containing different content types.
416    ContentList(Vec<InputContent>),
417}
418
419/// Parts of a message: text, image, file, or audio.
420#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
421#[serde(tag = "type", rename_all = "snake_case")]
422pub enum InputContent {
423    /// A text input to the model.
424    InputText(InputTextContent),
425    /// An image input to the model. Learn about
426    /// [image inputs](https://platform.openai.com/docs/guides/vision).
427    InputImage(InputImageContent),
428    /// A file input to the model.
429    InputFile(InputFileContent),
430}
431
432#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
433pub struct InputTextContent {
434    /// The text input to the model.
435    pub text: String,
436}
437
438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
439#[builder(
440    name = "InputImageArgs",
441    pattern = "mutable",
442    setter(into, strip_option),
443    default
444)]
445#[builder(build_fn(error = "OpenAIError"))]
446pub struct InputImageContent {
447    /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
448    /// Defaults to `auto`.
449    pub detail: ImageDetail,
450    /// The ID of the file to be sent to the model.
451    #[serde(skip_serializing_if = "Option::is_none")]
452    pub file_id: Option<String>,
453    /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
454    /// in a data URL.
455    #[serde(skip_serializing_if = "Option::is_none")]
456    pub image_url: Option<String>,
457}
458
459#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
460#[builder(
461    name = "InputFileArgs",
462    pattern = "mutable",
463    setter(into, strip_option),
464    default
465)]
466#[builder(build_fn(error = "OpenAIError"))]
467pub struct InputFileContent {
468    /// The content of the file to be sent to the model.
469    #[serde(skip_serializing_if = "Option::is_none")]
470    file_data: Option<String>,
471    /// The ID of the file to be sent to the model.
472    #[serde(skip_serializing_if = "Option::is_none")]
473    file_id: Option<String>,
474    /// The URL of the file to be sent to the model.
475    #[serde(skip_serializing_if = "Option::is_none")]
476    file_url: Option<String>,
477    /// The name of the file to be sent to the model.
478    #[serde(skip_serializing_if = "Option::is_none")]
479    filename: Option<String>,
480}
481
482/// The conversation that this response belonged to. Input items and output items from this
483/// response were automatically added to this conversation.
484#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
485pub struct Conversation {
486    /// The unique ID of the conversation that this response was associated with.
487    pub id: String,
488}
489
490#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
491#[serde(untagged)]
492pub enum ConversationParam {
493    /// The unique ID of the conversation.
494    ConversationID(String),
495    /// The conversation that this response belongs to.
496    Object(Conversation),
497}
498
499#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
500pub enum IncludeEnum {
501    #[serde(rename = "file_search_call.results")]
502    FileSearchCallResults,
503    #[serde(rename = "web_search_call.results")]
504    WebSearchCallResults,
505    #[serde(rename = "web_search_call.action.sources")]
506    WebSearchCallActionSources,
507    #[serde(rename = "message.input_image.image_url")]
508    MessageInputImageImageUrl,
509    #[serde(rename = "computer_call_output.output.image_url")]
510    ComputerCallOutputOutputImageUrl,
511    #[serde(rename = "code_interpreter_call.outputs")]
512    CodeInterpreterCallOutputs,
513    #[serde(rename = "reasoning.encrypted_content")]
514    ReasoningEncryptedContent,
515    #[serde(rename = "message.output_text.logprobs")]
516    MessageOutputTextLogprobs,
517}
518
519#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
520pub struct ResponseStreamOptions {
521    /// When true, stream obfuscation will be enabled. Stream obfuscation adds
522    /// random characters to an `obfuscation` field on streaming delta events to
523    /// normalize payload sizes as a mitigation to certain side-channel attacks.
524    /// These obfuscation fields are included by default, but add a small amount
525    /// of overhead to the data stream. You can set `include_obfuscation` to
526    /// false to optimize for bandwidth if you trust the network links between
527    /// your application and the OpenAI API.
528    #[serde(skip_serializing_if = "Option::is_none")]
529    pub include_obfuscation: Option<bool>,
530}
531
532/// Builder for a Responses API request.
533#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
534#[builder(
535    name = "CreateResponseArgs",
536    pattern = "mutable",
537    setter(into, strip_option),
538    default
539)]
540#[builder(build_fn(error = "OpenAIError"))]
541pub struct CreateResponse {
542    /// Whether to run the model response in the background.
543    /// [Learn more](https://platform.openai.com/docs/guides/background).
544    #[serde(skip_serializing_if = "Option::is_none")]
545    pub background: Option<bool>,
546
547    /// The conversation that this response belongs to. Items from this conversation are prepended to
548    ///  `input_items` for this response request.
549    ///
550    /// Input items and output items from this response are automatically added to this conversation after
551    /// this response completes.
552    #[serde(skip_serializing_if = "Option::is_none")]
553    pub conversation: Option<ConversationParam>,
554
555    /// Specify additional output data to include in the model response. Currently supported
556    /// values are:
557    ///
558    /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
559    ///
560    /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
561    ///   interpreter tool call items.
562    ///
563    /// - `computer_call_output.output.image_url`: Include image urls from the computer call
564    ///   output.
565    ///
566    /// - `file_search_call.results`: Include the search results of the file search tool call.
567    ///
568    /// - `message.input_image.image_url`: Include image urls from the input message.
569    ///
570    /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
571    ///
572    /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
573    ///   reasoning item outputs. This enables reasoning items to be used in multi-turn
574    ///   conversations when using the Responses API statelessly (like when the `store` parameter is
575    ///   set to `false`, or when an organization is enrolled in the zero data retention program).
576    #[serde(skip_serializing_if = "Option::is_none")]
577    pub include: Option<Vec<IncludeEnum>>,
578
579    /// Text, image, or file inputs to the model, used to generate a response.
580    ///
581    /// Learn more:
582    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
583    /// - [Image inputs](https://platform.openai.com/docs/guides/images)
584    /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
585    /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
586    /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
587    pub input: InputParam,
588
589    /// A system (or developer) message inserted into the model's context.
590    ///
591    /// When using along with `previous_response_id`, the instructions from a previous
592    /// response will not be carried over to the next response. This makes it simple
593    /// to swap out system (or developer) messages in new responses.
594    #[serde(skip_serializing_if = "Option::is_none")]
595    pub instructions: Option<String>,
596
597    /// An upper bound for the number of tokens that can be generated for a response, including
598    /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
599    #[serde(skip_serializing_if = "Option::is_none")]
600    pub max_output_tokens: Option<u32>,
601
602    /// The maximum number of total calls to built-in tools that can be processed in a response. This
603    /// maximum number applies across all built-in tool calls, not per individual tool. Any further
604    /// attempts to call a tool by the model will be ignored.
605    #[serde(skip_serializing_if = "Option::is_none")]
606    pub max_tool_calls: Option<u32>,
607
608    /// Set of 16 key-value pairs that can be attached to an object. This can be
609    /// useful for storing additional information about the object in a structured
610    /// format, and querying for objects via API or the dashboard.
611    ///
612    /// Keys are strings with a maximum length of 64 characters. Values are
613    /// strings with a maximum length of 512 characters.
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub metadata: Option<HashMap<String, String>>,
616
617    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
618    /// offers a wide range of models with different capabilities, performance
619    /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
620    /// to browse and compare available models.
621    #[serde(skip_serializing_if = "Option::is_none")]
622    pub model: Option<String>,
623
624    /// Whether to allow the model to run tool calls in parallel.
625    #[serde(skip_serializing_if = "Option::is_none")]
626    pub parallel_tool_calls: Option<bool>,
627
628    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
629    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
630    /// Cannot be used in conjunction with `conversation`.
631    #[serde(skip_serializing_if = "Option::is_none")]
632    pub previous_response_id: Option<String>,
633
634    /// Reference to a prompt template and its variables.
635    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
636    #[serde(skip_serializing_if = "Option::is_none")]
637    pub prompt: Option<Prompt>,
638
639    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
640    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
641    #[serde(skip_serializing_if = "Option::is_none")]
642    pub prompt_cache_key: Option<String>,
643
644    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
645    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
646    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
647    #[serde(skip_serializing_if = "Option::is_none")]
648    pub prompt_cache_retention: Option<PromptCacheRetention>,
649
650    /// **gpt-5 and o-series models only**
651    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
652    #[serde(skip_serializing_if = "Option::is_none")]
653    pub reasoning: Option<Reasoning>,
654
655    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
656    /// usage policies.
657    ///
658    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
659    /// or email address, in order to avoid sending us any identifying information. [Learn
660    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
661    #[serde(skip_serializing_if = "Option::is_none")]
662    pub safety_identifier: Option<String>,
663
664    /// Specifies the processing type used for serving the request.
665    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
666    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
667    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
668    /// - When not set, the default behavior is 'auto'.
669    ///
670    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
671    #[serde(skip_serializing_if = "Option::is_none")]
672    pub service_tier: Option<ServiceTier>,
673
674    /// Whether to store the generated model response for later retrieval via API.
675    #[serde(skip_serializing_if = "Option::is_none")]
676    pub store: Option<bool>,
677
678    /// If set to true, the model response data will be streamed to the client
679    /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
680    /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
681    /// for more information.
682    #[serde(skip_serializing_if = "Option::is_none")]
683    pub stream: Option<bool>,
684
685    /// Options for streaming responses. Only set this when you set `stream: true`.
686    #[serde(skip_serializing_if = "Option::is_none")]
687    pub stream_options: Option<ResponseStreamOptions>,
688
689    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
690    /// will make the output more random, while lower values like 0.2 will make it
691    /// more focused and deterministic. We generally recommend altering this or
692    /// `top_p` but not both.
693    #[serde(skip_serializing_if = "Option::is_none")]
694    pub temperature: Option<f32>,
695
696    /// Configuration options for a text response from the model. Can be plain
697    /// text or structured JSON data. Learn more:
698    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
699    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
700    #[serde(skip_serializing_if = "Option::is_none")]
701    pub text: Option<ResponseTextParam>,
702
703    /// How the model should select which tool (or tools) to use when generating
704    /// a response. See the `tools` parameter to see how to specify which tools
705    /// the model can call.
706    #[serde(skip_serializing_if = "Option::is_none")]
707    pub tool_choice: Option<ToolChoiceParam>,
708
709    /// An array of tools the model may call while generating a response. You
710    /// can specify which tool to use by setting the `tool_choice` parameter.
711    ///
712    /// We support the following categories of tools:
713    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
714    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
715    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
716    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
717    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
718    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
719    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
720    /// - **Function calls (custom tools)**: Functions that are defined by you,
721    ///   enabling the model to call your own code with strongly typed arguments
722    ///   and outputs. Learn more about
723    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
724    ///   custom tools to call your own code.
725    #[serde(skip_serializing_if = "Option::is_none")]
726    pub tools: Option<Vec<Tool>>,
727
728    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
729    /// token position, each with an associated log probability.
730    #[serde(skip_serializing_if = "Option::is_none")]
731    pub top_logprobs: Option<u8>,
732
733    /// An alternative to sampling with temperature, called nucleus sampling,
734    /// where the model considers the results of the tokens with top_p probability
735    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
736    /// are considered.
737    ///
738    /// We generally recommend altering this or `temperature` but not both.
739    #[serde(skip_serializing_if = "Option::is_none")]
740    pub top_p: Option<f32>,
741
742    ///The truncation strategy to use for the model response.
743    /// - `auto`: If the input to this Response exceeds
744    ///   the model's context window size, the model will truncate the
745    ///   response to fit the context window by dropping items from the beginning of the conversation.
746    /// - `disabled` (default): If the input size will exceed the context window
747    ///   size for a model, the request will fail with a 400 error.
748    #[serde(skip_serializing_if = "Option::is_none")]
749    pub truncation: Option<Truncation>,
750}
751
752#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
753#[serde(untagged)]
754pub enum ResponsePromptVariables {
755    String(String),
756    Content(InputContent),
757    Custom(serde_json::Value),
758}
759
760#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
761pub struct Prompt {
762    /// The unique identifier of the prompt template to use.
763    pub id: String,
764
765    /// Optional version of the prompt template.
766    #[serde(skip_serializing_if = "Option::is_none")]
767    pub version: Option<String>,
768
769    /// Optional map of values to substitute in for variables in your
770    /// prompt. The substitution values can either be strings, or other
771    /// Response input types like images or files.
772    #[serde(skip_serializing_if = "Option::is_none")]
773    pub variables: Option<ResponsePromptVariables>,
774}
775
776#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
777#[serde(rename_all = "lowercase")]
778pub enum ServiceTier {
779    #[default]
780    Auto,
781    Default,
782    Flex,
783    Scale,
784    Priority,
785}
786
787/// Truncation strategies.
788#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
789#[serde(rename_all = "lowercase")]
790pub enum Truncation {
791    Auto,
792    Disabled,
793}
794
795#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
796pub struct Billing {
797    pub payer: String,
798}
799
800/// o-series reasoning settings.
801#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
802#[builder(
803    name = "ReasoningArgs",
804    pattern = "mutable",
805    setter(into, strip_option),
806    default
807)]
808#[builder(build_fn(error = "OpenAIError"))]
809pub struct Reasoning {
810    /// Constrains effort on reasoning for
811    /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
812    /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
813    /// reasoning effort can result in faster responses and fewer tokens used
814    /// on reasoning in a response.
815    ///
816    /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
817    #[serde(skip_serializing_if = "Option::is_none")]
818    pub effort: Option<ReasoningEffort>,
819    /// A summary of the reasoning performed by the model. This can be
820    /// useful for debugging and understanding the model's reasoning process.
821    /// One of `auto`, `concise`, or `detailed`.
822    ///
823    /// `concise` is supported for `computer-use-preview` models and all reasoning models after
824    /// `gpt-5`.
825    #[serde(skip_serializing_if = "Option::is_none")]
826    pub summary: Option<ReasoningSummary>,
827}
828
829/// o-series reasoning settings.
830#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
831#[serde(rename_all = "lowercase")]
832pub enum Verbosity {
833    Low,
834    Medium,
835    High,
836}
837
838#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
839#[serde(rename_all = "lowercase")]
840pub enum ReasoningSummary {
841    Auto,
842    Concise,
843    Detailed,
844}
845
846/// The retention policy for the prompt cache.
847#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
848pub enum PromptCacheRetention {
849    #[serde(rename = "in_memory")]
850    InMemory,
851    #[serde(rename = "24h")]
852    Hours24,
853}
854
855/// Configuration for text response format.
856#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
857pub struct ResponseTextParam {
858    /// An object specifying the format that the model must output.
859    ///
860    /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
861    /// which ensures the model will match your supplied JSON schema. Learn more in the
862    /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
863    ///
864    /// The default format is `{ "type": "text" }` with no additional options.
865    ///
866    /// **Not recommended for gpt-4o and newer models:**
867    ///
868    /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
869    /// ensures the message the model generates is valid JSON. Using `json_schema`
870    /// is preferred for models that support it.
871    pub format: TextResponseFormatConfiguration,
872
873    /// Constrains the verbosity of the model's response. Lower values will result in
874    /// more concise responses, while higher values will result in more verbose responses.
875    ///
876    /// Currently supported values are `low`, `medium`, and `high`.
877    #[serde(skip_serializing_if = "Option::is_none")]
878    pub verbosity: Option<Verbosity>,
879}
880
881#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
882#[serde(tag = "type", rename_all = "snake_case")]
883pub enum TextResponseFormatConfiguration {
884    /// Default response format. Used to generate text responses.
885    Text,
886    /// JSON object response format. An older method of generating JSON responses.
887    /// Using `json_schema` is recommended for models that support it.
888    /// Note that the model will not generate JSON without a system or user message
889    /// instructing it to do so.
890    JsonObject,
891    /// JSON Schema response format. Used to generate structured JSON responses.
892    /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
893    JsonSchema(ResponseFormatJsonSchema),
894}
895
896/// Definitions for model-callable tools.
897#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
898#[serde(tag = "type", rename_all = "snake_case")]
899pub enum Tool {
900    /// Defines a function in your own code the model can choose to call. Learn more about [function
901    /// calling](https://platform.openai.com/docs/guides/tools).
902    Function(FunctionTool),
903    /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
904    /// tool](https://platform.openai.com/docs/guides/tools-file-search).
905    FileSearch(FileSearchTool),
906    /// A tool that controls a virtual computer. Learn more about the [computer
907    /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
908    ComputerUsePreview(ComputerUsePreviewTool),
909    /// Search the Internet for sources related to the prompt. Learn more about the
910    /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
911    WebSearch(WebSearchTool),
912    /// type: web_search_2025_08_26
913    #[serde(rename = "web_search_2025_08_26")]
914    WebSearch20250826(WebSearchTool),
915    /// Give the model access to additional tools via remote Model Context Protocol
916    /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
917    Mcp(MCPTool),
918    /// A tool that runs Python code to help generate a response to a prompt.
919    CodeInterpreter(CodeInterpreterTool),
920    /// A tool that generates images using a model like `gpt-image-1`.
921    ImageGeneration(ImageGenTool),
922    /// A tool that allows the model to execute shell commands in a local environment.
923    LocalShell,
924    /// A tool that allows the model to execute shell commands.
925    Shell(FunctionShellToolParam),
926    /// A custom tool that processes input using a specified format. Learn more about   [custom
927    /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
928    Custom(CustomToolParam),
929    /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
930    ///tool](https://platform.openai.com/docs/guides/tools-web-search).
931    WebSearchPreview(WebSearchTool),
932    /// type: web_search_preview_2025_03_11
933    #[serde(rename = "web_search_preview_2025_03_11")]
934    WebSearchPreview20250311(WebSearchTool),
935    /// Allows the assistant to create, delete, or update files using unified diffs.
936    ApplyPatch,
937}
938
939#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
940pub struct CustomToolParam {
941    /// The name of the custom tool, used to identify it in tool calls.
942    pub name: String,
943    /// Optional description of the custom tool, used to provide more context.
944    pub description: Option<String>,
945    /// The input format for the custom tool. Default is unconstrained text.
946    pub format: CustomToolParamFormat,
947}
948
949#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
950#[serde(tag = "type", rename_all = "lowercase")]
951pub enum CustomToolParamFormat {
952    /// Unconstrained free-form text.
953    #[default]
954    Text,
955    /// A grammar defined by the user.
956    Grammar(CustomGrammarFormatParam),
957}
958
959#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
960#[builder(
961    name = "FileSearchToolArgs",
962    pattern = "mutable",
963    setter(into, strip_option),
964    default
965)]
966#[builder(build_fn(error = "OpenAIError"))]
967pub struct FileSearchTool {
968    /// The IDs of the vector stores to search.
969    pub vector_store_ids: Vec<String>,
970    /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
971    #[serde(skip_serializing_if = "Option::is_none")]
972    pub max_num_results: Option<u32>,
973    /// A filter to apply.
974    #[serde(skip_serializing_if = "Option::is_none")]
975    pub filters: Option<Filter>,
976    /// Ranking options for search.
977    #[serde(skip_serializing_if = "Option::is_none")]
978    pub ranking_options: Option<RankingOptions>,
979}
980
981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
982#[builder(
983    name = "FunctionToolArgs",
984    pattern = "mutable",
985    setter(into, strip_option),
986    default
987)]
988pub struct FunctionTool {
989    /// The name of the function to call.
990    pub name: String,
991    /// A JSON schema object describing the parameters of the function.
992    #[serde(skip_serializing_if = "Option::is_none")]
993    pub parameters: Option<serde_json::Value>,
994    /// Whether to enforce strict parameter validation. Default `true`.
995    #[serde(skip_serializing_if = "Option::is_none")]
996    pub strict: Option<bool>,
997    /// A description of the function. Used by the model to determine whether or not to call the
998    /// function.
999    #[serde(skip_serializing_if = "Option::is_none")]
1000    pub description: Option<String>,
1001}
1002
1003#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1004pub struct WebSearchToolFilters {
1005    /// Allowed domains for the search. If not provided, all domains are allowed.
1006    /// Subdomains of the provided domains are allowed as well.
1007    ///
1008    /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1009    #[serde(skip_serializing_if = "Option::is_none")]
1010    pub allowed_domains: Option<Vec<String>>,
1011}
1012
1013#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1014#[builder(
1015    name = "WebSearchToolArgs",
1016    pattern = "mutable",
1017    setter(into, strip_option),
1018    default
1019)]
1020pub struct WebSearchTool {
1021    /// Filters for the search.
1022    #[serde(skip_serializing_if = "Option::is_none")]
1023    pub filters: Option<WebSearchToolFilters>,
1024    /// The approximate location of the user.
1025    #[serde(skip_serializing_if = "Option::is_none")]
1026    pub user_location: Option<WebSearchApproximateLocation>,
1027    /// High level guidance for the amount of context window space to use for the search. One of `low`,
1028    /// `medium`, or `high`. `medium` is the default.
1029    #[serde(skip_serializing_if = "Option::is_none")]
1030    pub search_context_size: Option<WebSearchToolSearchContextSize>,
1031}
1032
1033#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1034#[serde(rename_all = "lowercase")]
1035pub enum WebSearchToolSearchContextSize {
1036    Low,
1037    #[default]
1038    Medium,
1039    High,
1040}
1041
1042#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1043#[serde(rename_all = "lowercase")]
1044pub enum ComputerEnvironment {
1045    Windows,
1046    Mac,
1047    Linux,
1048    Ubuntu,
1049    #[default]
1050    Browser,
1051}
1052
1053#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1054#[builder(
1055    name = "ComputerUsePreviewToolArgs",
1056    pattern = "mutable",
1057    setter(into, strip_option),
1058    default
1059)]
1060pub struct ComputerUsePreviewTool {
1061    /// The type of computer environment to control.
1062    environment: ComputerEnvironment,
1063    /// The width of the computer display.
1064    display_width: u32,
1065    /// The height of the computer display.
1066    display_height: u32,
1067}
1068
1069#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1070pub enum RankVersionType {
1071    #[serde(rename = "auto")]
1072    Auto,
1073    #[serde(rename = "default-2024-11-15")]
1074    Default20241115,
1075}
1076
1077#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1078pub struct HybridSearch {
1079    /// The weight of the embedding in the reciprocal ranking fusion.
1080    pub embedding_weight: f32,
1081    /// The weight of the text in the reciprocal ranking fusion.
1082    pub text_weight: f32,
1083}
1084
1085/// Options for search result ranking.
1086#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1087pub struct RankingOptions {
1088    /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1089    /// sparse keyword matches when hybrid search is enabled.
1090    #[serde(skip_serializing_if = "Option::is_none")]
1091    pub hybrid_search: Option<HybridSearch>,
1092    /// The ranker to use for the file search.
1093    pub ranker: RankVersionType,
1094    /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1095    /// attempt to return only the most relevant results, but may return fewer results.
1096    #[serde(skip_serializing_if = "Option::is_none")]
1097    pub score_threshold: Option<f32>,
1098}
1099
1100#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1101#[serde(rename_all = "lowercase")]
1102pub enum WebSearchApproximateLocationType {
1103    #[default]
1104    Approximate,
1105}
1106
1107/// Approximate user location for web search.
1108#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1109#[builder(
1110    name = "WebSearchApproximateLocationArgs",
1111    pattern = "mutable",
1112    setter(into, strip_option),
1113    default
1114)]
1115#[builder(build_fn(error = "OpenAIError"))]
1116pub struct WebSearchApproximateLocation {
1117    /// The type of location approximation. Always `approximate`.
1118    pub r#type: WebSearchApproximateLocationType,
1119    /// Free text input for the city of the user, e.g. `San Francisco`.
1120    #[serde(skip_serializing_if = "Option::is_none")]
1121    pub city: Option<String>,
1122    /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1123    /// e.g. `US`.
1124    #[serde(skip_serializing_if = "Option::is_none")]
1125    pub country: Option<String>,
1126    /// Free text input for the region of the user, e.g. `California`.
1127    #[serde(skip_serializing_if = "Option::is_none")]
1128    pub region: Option<String>,
1129    /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1130    /// `America/Los_Angeles`.
1131    #[serde(skip_serializing_if = "Option::is_none")]
1132    pub timezone: Option<String>,
1133}
1134
1135/// Container configuration for a code interpreter.
1136#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1137#[serde(tag = "type", rename_all = "snake_case")]
1138pub enum CodeInterpreterToolContainer {
1139    /// Configuration for a code interpreter container. Optionally specify the IDs of the
1140    /// files to run the code on.
1141    Auto(CodeInterpreterContainerAuto),
1142
1143    /// The container ID.
1144    #[serde(untagged)]
1145    ContainerID(String),
1146}
1147
1148/// Auto configuration for code interpreter container.
1149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1150pub struct CodeInterpreterContainerAuto {
1151    /// An optional list of uploaded files to make available to your code.
1152    #[serde(skip_serializing_if = "Option::is_none")]
1153    pub file_ids: Option<Vec<String>>,
1154
1155    #[serde(skip_serializing_if = "Option::is_none")]
1156    pub memory_limit: Option<u64>,
1157}
1158
1159#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1160#[builder(
1161    name = "CodeInterpreterToolArgs",
1162    pattern = "mutable",
1163    setter(into, strip_option),
1164    default
1165)]
1166#[builder(build_fn(error = "OpenAIError"))]
1167pub struct CodeInterpreterTool {
1168    /// The code interpreter container. Can be a container ID or an object that
1169    /// specifies uploaded file IDs to make available to your code, along with an
1170    /// optional `memory_limit` setting.
1171    pub container: CodeInterpreterToolContainer,
1172}
1173
1174#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1175pub struct ImageGenToolInputImageMask {
1176    /// Base64-encoded mask image.
1177    #[serde(skip_serializing_if = "Option::is_none")]
1178    pub image_url: Option<String>,
1179    /// File ID for the mask image.
1180    #[serde(skip_serializing_if = "Option::is_none")]
1181    pub file_id: Option<String>,
1182}
1183
1184#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1185#[serde(rename_all = "lowercase")]
1186pub enum InputFidelity {
1187    #[default]
1188    High,
1189    Low,
1190}
1191
1192#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1193#[serde(rename_all = "lowercase")]
1194pub enum ImageGenToolModeration {
1195    #[default]
1196    Auto,
1197    Low,
1198}
1199
1200/// Whether to generate a new image or edit an existing image.
1201#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1202#[serde(rename_all = "lowercase")]
1203pub enum ImageGenActionEnum {
1204    /// Generate a new image.
1205    Generate,
1206    /// Edit an existing image.
1207    Edit,
1208    /// Automatically determine whether to generate or edit.
1209    #[default]
1210    Auto,
1211}
1212
1213/// Image generation tool definition.
1214#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1215#[builder(
1216    name = "ImageGenerationArgs",
1217    pattern = "mutable",
1218    setter(into, strip_option),
1219    default
1220)]
1221#[builder(build_fn(error = "OpenAIError"))]
1222pub struct ImageGenTool {
1223    /// Background type for the generated image. One of `transparent`,
1224    /// `opaque`, or `auto`. Default: `auto`.
1225    #[serde(skip_serializing_if = "Option::is_none")]
1226    pub background: Option<ImageGenToolBackground>,
1227    /// Control how much effort the model will exert to match the style and features, especially facial features,
1228    /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1229    /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1230    #[serde(skip_serializing_if = "Option::is_none")]
1231    pub input_fidelity: Option<InputFidelity>,
1232    /// Optional mask for inpainting. Contains `image_url`
1233    /// (string, optional) and `file_id` (string, optional).
1234    #[serde(skip_serializing_if = "Option::is_none")]
1235    pub input_image_mask: Option<ImageGenToolInputImageMask>,
1236    /// The image generation model to use. Default: `gpt-image-1`.
1237    #[serde(skip_serializing_if = "Option::is_none")]
1238    pub model: Option<String>,
1239    /// Moderation level for the generated image. Default: `auto`.
1240    #[serde(skip_serializing_if = "Option::is_none")]
1241    pub moderation: Option<ImageGenToolModeration>,
1242    /// Compression level for the output image. Default: 100.
1243    #[serde(skip_serializing_if = "Option::is_none")]
1244    pub output_compression: Option<u8>,
1245    /// The output format of the generated image. One of `png`, `webp`, or
1246    /// `jpeg`. Default: `png`.
1247    #[serde(skip_serializing_if = "Option::is_none")]
1248    pub output_format: Option<ImageGenToolOutputFormat>,
1249    /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1250    #[serde(skip_serializing_if = "Option::is_none")]
1251    pub partial_images: Option<u8>,
1252    /// The quality of the generated image. One of `low`, `medium`, `high`,
1253    /// or `auto`. Default: `auto`.
1254    #[serde(skip_serializing_if = "Option::is_none")]
1255    pub quality: Option<ImageGenToolQuality>,
1256    /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1257    /// `1536x1024`, or `auto`. Default: `auto`.
1258    #[serde(skip_serializing_if = "Option::is_none")]
1259    pub size: Option<ImageGenToolSize>,
1260    /// Whether to generate a new image or edit an existing image. Default: `auto`.
1261    #[serde(skip_serializing_if = "Option::is_none")]
1262    pub action: Option<ImageGenActionEnum>,
1263}
1264
1265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1266#[serde(rename_all = "lowercase")]
1267pub enum ImageGenToolBackground {
1268    Transparent,
1269    Opaque,
1270    #[default]
1271    Auto,
1272}
1273
1274#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1275#[serde(rename_all = "lowercase")]
1276pub enum ImageGenToolOutputFormat {
1277    #[default]
1278    Png,
1279    Webp,
1280    Jpeg,
1281}
1282
1283#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1284#[serde(rename_all = "lowercase")]
1285pub enum ImageGenToolQuality {
1286    Low,
1287    Medium,
1288    High,
1289    #[default]
1290    Auto,
1291}
1292
1293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1294#[serde(rename_all = "lowercase")]
1295pub enum ImageGenToolSize {
1296    #[default]
1297    Auto,
1298    #[serde(rename = "1024x1024")]
1299    Size1024x1024,
1300    #[serde(rename = "1024x1536")]
1301    Size1024x1536,
1302    #[serde(rename = "1536x1024")]
1303    Size1536x1024,
1304}
1305
1306#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1307#[serde(rename_all = "lowercase")]
1308pub enum ToolChoiceAllowedMode {
1309    Auto,
1310    Required,
1311}
1312
1313#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1314pub struct ToolChoiceAllowed {
1315    /// Constrains the tools available to the model to a pre-defined set.
1316    ///
1317    /// `auto` allows the model to pick from among the allowed tools and generate a
1318    /// message.
1319    ///
1320    /// `required` requires the model to call one or more of the allowed tools.
1321    pub mode: ToolChoiceAllowedMode,
1322    /// A list of tool definitions that the model should be allowed to call.
1323    ///
1324    /// For the Responses API, the list of tool definitions might look like:
1325    /// ```json
1326    /// [
1327    ///   { "type": "function", "name": "get_weather" },
1328    ///   { "type": "mcp", "server_label": "deepwiki" },
1329    ///   { "type": "image_generation" }
1330    /// ]
1331    /// ```
1332    pub tools: Vec<serde_json::Value>,
1333}
1334
1335/// The type of hosted tool the model should to use. Learn more about
1336/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1337#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1338#[serde(tag = "type", rename_all = "snake_case")]
1339pub enum ToolChoiceTypes {
1340    FileSearch,
1341    WebSearchPreview,
1342    ComputerUsePreview,
1343    CodeInterpreter,
1344    ImageGeneration,
1345}
1346
1347#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1348pub struct ToolChoiceFunction {
1349    /// The name of the function to call.
1350    pub name: String,
1351}
1352
1353#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1354pub struct ToolChoiceMCP {
1355    /// The name of the tool to call on the server.
1356    pub name: String,
1357    /// The label of the MCP server to use.
1358    pub server_label: String,
1359}
1360
1361#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1362pub struct ToolChoiceCustom {
1363    /// The name of the custom tool to call.
1364    pub name: String,
1365}
1366
1367#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1368#[serde(tag = "type", rename_all = "snake_case")]
1369pub enum ToolChoiceParam {
1370    /// Constrains the tools available to the model to a pre-defined set.
1371    AllowedTools(ToolChoiceAllowed),
1372
1373    /// Use this option to force the model to call a specific function.
1374    Function(ToolChoiceFunction),
1375
1376    /// Use this option to force the model to call a specific tool on a remote MCP server.
1377    Mcp(ToolChoiceMCP),
1378
1379    /// Use this option to force the model to call a custom tool.
1380    Custom(ToolChoiceCustom),
1381
1382    /// Forces the model to call the apply_patch tool when executing a tool call.
1383    ApplyPatch,
1384
1385    /// Forces the model to call the function shell tool when a tool call is required.
1386    Shell,
1387
1388    /// Indicates that the model should use a built-in tool to generate a response.
1389    /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1390    #[serde(untagged)]
1391    Hosted(ToolChoiceTypes),
1392
1393    /// Controls which (if any) tool is called by the model.
1394    ///
1395    /// `none` means the model will not call any tool and instead generates a message.
1396    ///
1397    /// `auto` means the model can pick between generating a message or calling one or
1398    /// more tools.
1399    ///
1400    /// `required` means the model must call one or more tools.
1401    #[serde(untagged)]
1402    Mode(ToolChoiceOptions),
1403}
1404
1405#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1406#[serde(rename_all = "lowercase")]
1407pub enum ToolChoiceOptions {
1408    None,
1409    Auto,
1410    Required,
1411}
1412
1413/// An error that occurred while generating the response.
1414#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1415pub struct ErrorObject {
1416    /// A machine-readable error code that was returned.
1417    pub code: String,
1418    /// A human-readable description of the error that was returned.
1419    pub message: String,
1420}
1421
1422/// Details about an incomplete response.
1423#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1424pub struct IncompleteDetails {
1425    /// The reason why the response is incomplete.
1426    pub reason: String,
1427}
1428
1429#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1430pub struct TopLogProb {
1431    pub bytes: Vec<u8>,
1432    pub logprob: f64,
1433    pub token: String,
1434}
1435
1436#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1437pub struct LogProb {
1438    pub bytes: Vec<u8>,
1439    pub logprob: f64,
1440    pub token: String,
1441    pub top_logprobs: Vec<TopLogProb>,
1442}
1443
1444#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1445pub struct ResponseTopLobProb {
1446    /// The log probability of this token.
1447    pub logprob: f64,
1448    /// A possible text token.
1449    pub token: String,
1450}
1451
1452#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1453pub struct ResponseLogProb {
1454    /// The log probability of this token.
1455    pub logprob: f64,
1456    /// A possible text token.
1457    pub token: String,
1458    /// The log probability of the top 20 most likely tokens.
1459    pub top_logprobs: Vec<ResponseTopLobProb>,
1460}
1461
1462/// A simple text output from the model.
1463#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1464pub struct OutputTextContent {
1465    /// The annotations of the text output.
1466    pub annotations: Vec<Annotation>,
1467    pub logprobs: Option<Vec<LogProb>>,
1468    /// The text output from the model.
1469    pub text: String,
1470}
1471
1472/// An annotation that applies to a span of output text.
1473#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1474#[serde(tag = "type", rename_all = "snake_case")]
1475pub enum Annotation {
1476    /// A citation to a file.
1477    FileCitation(FileCitationBody),
1478    /// A citation for a web resource used to generate a model response.
1479    UrlCitation(UrlCitationBody),
1480    /// A citation for a container file used to generate a model response.
1481    ContainerFileCitation(ContainerFileCitationBody),
1482    /// A path to a file.
1483    FilePath(FilePath),
1484}
1485
1486#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1487pub struct FileCitationBody {
1488    /// The ID of the file.
1489    file_id: String,
1490    /// The filename of the file cited.
1491    filename: String,
1492    /// The index of the file in the list of files.
1493    index: u32,
1494}
1495
1496#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1497pub struct UrlCitationBody {
1498    /// The index of the last character of the URL citation in the message.
1499    end_index: u32,
1500    /// The index of the first character of the URL citation in the message.
1501    start_index: u32,
1502    /// The title of the web resource.
1503    title: String,
1504    /// The URL of the web resource.
1505    url: String,
1506}
1507
1508#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1509pub struct ContainerFileCitationBody {
1510    /// The ID of the container file.
1511    container_id: String,
1512    /// The index of the last character of the container file citation in the message.
1513    end_index: u32,
1514    /// The ID of the file.
1515    file_id: String,
1516    /// The filename of the container file cited.
1517    filename: String,
1518    /// The index of the first character of the container file citation in the message.
1519    start_index: u32,
1520}
1521
1522#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1523pub struct FilePath {
1524    /// The ID of the file.
1525    file_id: String,
1526    /// The index of the file in the list of files.
1527    index: u32,
1528}
1529
1530/// A refusal explanation from the model.
1531#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1532pub struct RefusalContent {
1533    /// The refusal explanation from the model.
1534    pub refusal: String,
1535}
1536
1537/// A message generated by the model.
1538#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1539pub struct OutputMessage {
1540    /// The content of the output message.
1541    pub content: Vec<OutputMessageContent>,
1542    /// The unique ID of the output message.
1543    pub id: String,
1544    /// The role of the output message. Always `assistant`.
1545    pub role: AssistantRole,
1546    /// The status of the message input. One of `in_progress`, `completed`, or
1547    /// `incomplete`. Populated when input items are returned via API.
1548    pub status: OutputStatus,
1549    ///// The type of the output message. Always `message`.
1550    //pub r#type: MessageType,
1551}
1552
1553#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1554#[serde(rename_all = "lowercase")]
1555pub enum MessageType {
1556    #[default]
1557    Message,
1558}
1559
1560/// The role for an output message - always `assistant`.
1561/// This type ensures type safety by only allowing the assistant role.
1562#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1563#[serde(rename_all = "lowercase")]
1564pub enum AssistantRole {
1565    #[default]
1566    Assistant,
1567}
1568
1569#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1570#[serde(tag = "type", rename_all = "snake_case")]
1571pub enum OutputMessageContent {
1572    /// A text output from the model.
1573    OutputText(OutputTextContent),
1574    /// A refusal from the model.
1575    Refusal(RefusalContent),
1576}
1577
1578#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1579#[serde(tag = "type", rename_all = "snake_case")]
1580pub enum OutputContent {
1581    /// A text output from the model.
1582    OutputText(OutputTextContent),
1583    /// A refusal from the model.
1584    Refusal(RefusalContent),
1585    /// Reasoning text from the model.
1586    ReasoningText(ReasoningTextContent),
1587}
1588
1589#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1590pub struct ReasoningTextContent {
1591    /// The reasoning text from the model.
1592    pub text: String,
1593}
1594
1595/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1596#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1597pub struct ReasoningItem {
1598    /// Unique identifier of the reasoning content.
1599    pub id: String,
1600    /// Reasoning summary content.
1601    pub summary: Vec<SummaryPart>,
1602    /// Reasoning text content.
1603    #[serde(skip_serializing_if = "Option::is_none")]
1604    pub content: Option<Vec<ReasoningTextContent>>,
1605    /// The encrypted content of the reasoning item - populated when a response is generated with
1606    /// `reasoning.encrypted_content` in the `include` parameter.
1607    #[serde(skip_serializing_if = "Option::is_none")]
1608    pub encrypted_content: Option<String>,
1609    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1610    /// Populated when items are returned via API.
1611    #[serde(skip_serializing_if = "Option::is_none")]
1612    pub status: Option<OutputStatus>,
1613}
1614
1615#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1616#[serde(tag = "type", rename_all = "snake_case")]
1617pub enum SummaryPart {
1618    SummaryText(SummaryTextContent),
1619}
1620
1621/// File search tool call output.
1622#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1623pub struct FileSearchToolCall {
1624    /// The unique ID of the file search tool call.
1625    pub id: String,
1626    /// The queries used to search for files.
1627    pub queries: Vec<String>,
1628    /// The status of the file search tool call. One of `in_progress`, `searching`,
1629    /// `incomplete`,`failed`, or `completed`.
1630    pub status: FileSearchToolCallStatus,
1631    /// The results of the file search tool call.
1632    #[serde(skip_serializing_if = "Option::is_none")]
1633    pub results: Option<Vec<FileSearchToolCallResult>>,
1634}
1635
1636#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1637#[serde(rename_all = "snake_case")]
1638pub enum FileSearchToolCallStatus {
1639    InProgress,
1640    Searching,
1641    Incomplete,
1642    Failed,
1643    Completed,
1644}
1645
1646/// A single result from a file search.
1647#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1648pub struct FileSearchToolCallResult {
1649    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1650    /// additional information about the object in a structured format, and querying for objects
1651    /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1652    /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1653    pub attributes: HashMap<String, serde_json::Value>,
1654    /// The unique ID of the file.
1655    pub file_id: String,
1656    /// The name of the file.
1657    pub filename: String,
1658    /// The relevance score of the file - a value between 0 and 1.
1659    pub score: f32,
1660    /// The text that was retrieved from the file.
1661    pub text: String,
1662}
1663
1664#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1665pub struct ComputerCallSafetyCheckParam {
1666    /// The ID of the pending safety check.
1667    pub id: String,
1668    /// The type of the pending safety check.
1669    #[serde(skip_serializing_if = "Option::is_none")]
1670    pub code: Option<String>,
1671    /// Details about the pending safety check.
1672    #[serde(skip_serializing_if = "Option::is_none")]
1673    pub message: Option<String>,
1674}
1675
1676#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1677#[serde(rename_all = "snake_case")]
1678pub enum WebSearchToolCallStatus {
1679    InProgress,
1680    Searching,
1681    Completed,
1682    Failed,
1683}
1684
1685#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1686pub struct WebSearchActionSearchSource {
1687    /// The type of source. Always `url`.
1688    pub r#type: String,
1689    /// The URL of the source.
1690    pub url: String,
1691}
1692
1693#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1694pub struct WebSearchActionSearch {
1695    /// The search query.
1696    pub query: String,
1697    /// The sources used in the search.
1698    pub sources: Option<Vec<WebSearchActionSearchSource>>,
1699}
1700
1701#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1702pub struct WebSearchActionOpenPage {
1703    /// The URL opened by the model.
1704    pub url: Option<String>,
1705}
1706
1707#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1708pub struct WebSearchActionFind {
1709    /// The URL of the page searched for the pattern.
1710    pub url: String,
1711    /// The pattern or text to search for within the page.
1712    pub pattern: String,
1713}
1714
1715#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1716#[serde(tag = "type", rename_all = "snake_case")]
1717pub enum WebSearchToolCallAction {
1718    /// Action type "search" - Performs a web search query.
1719    Search(WebSearchActionSearch),
1720    /// Action type "open_page" - Opens a specific URL from search results.
1721    OpenPage(WebSearchActionOpenPage),
1722    /// Action type "find": Searches for a pattern within a loaded page.
1723    Find(WebSearchActionFind),
1724    /// Action type "find_in_page": https://platform.openai.com/docs/guides/tools-web-search#output-and-citations
1725    FindInPage(WebSearchActionFind),
1726}
1727
1728/// Web search tool call output.
1729#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1730pub struct WebSearchToolCall {
1731    /// An object describing the specific action taken in this web search call. Includes
1732    /// details on how the model used the web (search, open_page, find, find_in_page).
1733    pub action: WebSearchToolCallAction,
1734    /// The unique ID of the web search tool call.
1735    pub id: String,
1736    /// The status of the web search tool call.
1737    pub status: WebSearchToolCallStatus,
1738}
1739
1740/// Output from a computer tool call.
1741#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1742pub struct ComputerToolCall {
1743    pub action: ComputerAction,
1744    /// An identifier used when responding to the tool call with output.
1745    pub call_id: String,
1746    /// The unique ID of the computer call.
1747    pub id: String,
1748    /// The pending safety checks for the computer call.
1749    pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1750    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1751    /// Populated when items are returned via API.
1752    pub status: OutputStatus,
1753}
1754
1755/// An x/y coordinate pair.
1756#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1757pub struct CoordParam {
1758    /// The x-coordinate.
1759    pub x: i32,
1760    /// The y-coordinate.
1761    pub y: i32,
1762}
1763
1764/// Represents all user‐triggered actions.
1765#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1766#[serde(tag = "type", rename_all = "snake_case")]
1767pub enum ComputerAction {
1768    /// A click action.
1769    Click(ClickParam),
1770
1771    /// A double click action.
1772    DoubleClick(DoubleClickAction),
1773
1774    /// A drag action.
1775    Drag(DragParam),
1776
1777    /// A collection of keypresses the model would like to perform.
1778    Keypress(KeyPressAction),
1779
1780    /// A mouse move action.
1781    Move(MoveParam),
1782
1783    /// A screenshot action.
1784    Screenshot,
1785
1786    /// A scroll action.
1787    Scroll(ScrollParam),
1788
1789    /// An action to type in text.
1790    Type(TypeParam),
1791
1792    /// A wait action.
1793    Wait,
1794}
1795
1796#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1797#[serde(rename_all = "lowercase")]
1798pub enum ClickButtonType {
1799    Left,
1800    Right,
1801    Wheel,
1802    Back,
1803    Forward,
1804}
1805
1806/// A click action.
1807#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1808pub struct ClickParam {
1809    /// Indicates which mouse button was pressed during the click. One of `left`,
1810    /// `right`, `wheel`, `back`, or `forward`.
1811    pub button: ClickButtonType,
1812    /// The x-coordinate where the click occurred.
1813    pub x: i32,
1814    /// The y-coordinate where the click occurred.
1815    pub y: i32,
1816}
1817
1818/// A double click action.
1819#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1820pub struct DoubleClickAction {
1821    /// The x-coordinate where the double click occurred.
1822    pub x: i32,
1823    /// The y-coordinate where the double click occurred.
1824    pub y: i32,
1825}
1826
1827/// A drag action.
1828#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1829pub struct DragParam {
1830    /// An array of coordinates representing the path of the drag action.
1831    pub path: Vec<CoordParam>,
1832}
1833
1834/// A keypress action.
1835#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1836pub struct KeyPressAction {
1837    /// The combination of keys the model is requesting to be pressed.
1838    /// This is an array of strings, each representing a key.
1839    pub keys: Vec<String>,
1840}
1841
1842/// A mouse move action.
1843#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1844pub struct MoveParam {
1845    /// The x-coordinate to move to.
1846    pub x: i32,
1847    /// The y-coordinate to move to.
1848    pub y: i32,
1849}
1850
1851/// A scroll action.
1852#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1853pub struct ScrollParam {
1854    /// The horizontal scroll distance.
1855    pub scroll_x: i32,
1856    /// The vertical scroll distance.
1857    pub scroll_y: i32,
1858    /// The x-coordinate where the scroll occurred.
1859    pub x: i32,
1860    /// The y-coordinate where the scroll occurred.
1861    pub y: i32,
1862}
1863
1864/// A typing (text entry) action.
1865#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1866pub struct TypeParam {
1867    /// The text to type.
1868    pub text: String,
1869}
1870
1871#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1872pub struct FunctionToolCall {
1873    /// A JSON string of the arguments to pass to the function.
1874    pub arguments: String,
1875    /// The unique ID of the function tool call generated by the model.
1876    pub call_id: String,
1877    /// The name of the function to run.
1878    pub name: String,
1879    /// The unique ID of the function tool call.
1880    #[serde(skip_serializing_if = "Option::is_none")]
1881    pub id: Option<String>,
1882    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1883    /// Populated when items are returned via API.
1884    #[serde(skip_serializing_if = "Option::is_none")]
1885    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1886}
1887
1888#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1889#[serde(rename_all = "snake_case")]
1890pub enum ImageGenToolCallStatus {
1891    InProgress,
1892    Completed,
1893    Generating,
1894    Failed,
1895}
1896
1897#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1898pub struct ImageGenToolCall {
1899    /// The unique ID of the image generation call.
1900    pub id: String,
1901    /// The generated image encoded in base64.
1902    pub result: Option<String>,
1903    /// The status of the image generation call.
1904    pub status: ImageGenToolCallStatus,
1905}
1906
1907#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1908#[serde(rename_all = "snake_case")]
1909pub enum CodeInterpreterToolCallStatus {
1910    InProgress,
1911    Completed,
1912    Incomplete,
1913    Interpreting,
1914    Failed,
1915}
1916
1917/// Output of a code interpreter request.
1918#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1919pub struct CodeInterpreterToolCall {
1920    /// The code to run, or null if not available.
1921    #[serde(skip_serializing_if = "Option::is_none")]
1922    pub code: Option<String>,
1923    /// ID of the container used to run the code.
1924    pub container_id: String,
1925    /// The unique ID of the code interpreter tool call.
1926    pub id: String,
1927    /// The outputs generated by the code interpreter, such as logs or images.
1928    /// Can be null if no outputs are available.
1929    #[serde(skip_serializing_if = "Option::is_none")]
1930    pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1931    /// The status of the code interpreter tool call.
1932    /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1933    pub status: CodeInterpreterToolCallStatus,
1934}
1935
1936/// Individual result from a code interpreter: either logs or files.
1937#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1938#[serde(tag = "type", rename_all = "snake_case")]
1939pub enum CodeInterpreterToolCallOutput {
1940    /// Code interpreter output logs
1941    Logs(CodeInterpreterOutputLogs),
1942    /// Code interpreter output image
1943    Image(CodeInterpreterOutputImage),
1944}
1945
1946#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1947pub struct CodeInterpreterOutputLogs {
1948    /// The logs output from the code interpreter.
1949    pub logs: String,
1950}
1951
1952#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1953pub struct CodeInterpreterOutputImage {
1954    /// The URL of the image output from the code interpreter.
1955    pub url: String,
1956}
1957
1958#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1959pub struct CodeInterpreterFile {
1960    /// The ID of the file.
1961    file_id: String,
1962    /// The MIME type of the file.
1963    mime_type: String,
1964}
1965
1966#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1967pub struct LocalShellToolCall {
1968    /// Execute a shell command on the server.
1969    pub action: LocalShellExecAction,
1970    /// The unique ID of the local shell tool call generated by the model.
1971    pub call_id: String,
1972    /// The unique ID of the local shell call.
1973    pub id: String,
1974    /// The status of the local shell call.
1975    pub status: OutputStatus,
1976}
1977
1978/// Define the shape of a local shell action (exec).
1979#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1980pub struct LocalShellExecAction {
1981    /// The command to run.
1982    pub command: Vec<String>,
1983    /// Environment variables to set for the command.
1984    pub env: HashMap<String, String>,
1985    /// Optional timeout in milliseconds for the command.
1986    pub timeout_ms: Option<u64>,
1987    /// Optional user to run the command as.
1988    pub user: Option<String>,
1989    /// Optional working directory to run the command in.
1990    pub working_directory: Option<String>,
1991}
1992
1993/// Commands and limits describing how to run the shell tool call.
1994#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1995pub struct FunctionShellActionParam {
1996    /// Ordered shell commands for the execution environment to run.
1997    pub commands: Vec<String>,
1998    /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
1999    #[serde(skip_serializing_if = "Option::is_none")]
2000    pub timeout_ms: Option<u64>,
2001    /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
2002    #[serde(skip_serializing_if = "Option::is_none")]
2003    pub max_output_length: Option<u64>,
2004}
2005
2006/// Status values reported for shell tool calls.
2007#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2008#[serde(rename_all = "snake_case")]
2009pub enum FunctionShellCallItemStatus {
2010    InProgress,
2011    Completed,
2012    Incomplete,
2013}
2014
2015/// The environment for a shell call item (request side).
2016#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2017#[serde(tag = "type", rename_all = "snake_case")]
2018pub enum FunctionShellCallItemEnvironment {
2019    /// Use a local computer environment.
2020    Local(LocalEnvironmentParam),
2021    /// Reference an existing container by ID.
2022    ContainerReference(ContainerReferenceParam),
2023}
2024
2025/// A tool representing a request to execute one or more shell commands.
2026#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2027pub struct FunctionShellCallItemParam {
2028    /// The unique ID of the shell tool call. Populated when this item is returned via API.
2029    #[serde(skip_serializing_if = "Option::is_none")]
2030    pub id: Option<String>,
2031    /// The unique ID of the shell tool call generated by the model.
2032    pub call_id: String,
2033    /// The shell commands and limits that describe how to run the tool call.
2034    pub action: FunctionShellActionParam,
2035    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2036    #[serde(skip_serializing_if = "Option::is_none")]
2037    pub status: Option<FunctionShellCallItemStatus>,
2038    /// The environment to execute the shell commands in.
2039    #[serde(skip_serializing_if = "Option::is_none")]
2040    pub environment: Option<FunctionShellCallItemEnvironment>,
2041}
2042
2043/// Indicates that the shell commands finished and returned an exit code.
2044#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2045pub struct FunctionShellCallOutputExitOutcomeParam {
2046    /// The exit code returned by the shell process.
2047    pub exit_code: i32,
2048}
2049
2050/// The exit or timeout outcome associated with this chunk.
2051#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2052#[serde(tag = "type", rename_all = "snake_case")]
2053pub enum FunctionShellCallOutputOutcomeParam {
2054    Timeout,
2055    Exit(FunctionShellCallOutputExitOutcomeParam),
2056}
2057
2058/// Captured stdout and stderr for a portion of a shell tool call output.
2059#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2060pub struct FunctionShellCallOutputContentParam {
2061    /// Captured stdout output for this chunk of the shell call.
2062    pub stdout: String,
2063    /// Captured stderr output for this chunk of the shell call.
2064    pub stderr: String,
2065    /// The exit or timeout outcome associated with this chunk.
2066    pub outcome: FunctionShellCallOutputOutcomeParam,
2067}
2068
2069/// The streamed output items emitted by a shell tool call.
2070#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2071pub struct FunctionShellCallOutputItemParam {
2072    /// The unique ID of the shell tool call output. Populated when this item is returned via API.
2073    #[serde(skip_serializing_if = "Option::is_none")]
2074    pub id: Option<String>,
2075    /// The unique ID of the shell tool call generated by the model.
2076    pub call_id: String,
2077    /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2078    pub output: Vec<FunctionShellCallOutputContentParam>,
2079    /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2080    #[serde(skip_serializing_if = "Option::is_none")]
2081    pub max_output_length: Option<u64>,
2082}
2083
2084/// Status values reported for apply_patch tool calls.
2085#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2086#[serde(rename_all = "snake_case")]
2087pub enum ApplyPatchCallStatusParam {
2088    InProgress,
2089    Completed,
2090}
2091
2092/// Instruction for creating a new file via the apply_patch tool.
2093#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2094pub struct ApplyPatchCreateFileOperationParam {
2095    /// Path of the file to create relative to the workspace root.
2096    pub path: String,
2097    /// Unified diff content to apply when creating the file.
2098    pub diff: String,
2099}
2100
2101/// Instruction for deleting an existing file via the apply_patch tool.
2102#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2103pub struct ApplyPatchDeleteFileOperationParam {
2104    /// Path of the file to delete relative to the workspace root.
2105    pub path: String,
2106}
2107
2108/// Instruction for updating an existing file via the apply_patch tool.
2109#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2110pub struct ApplyPatchUpdateFileOperationParam {
2111    /// Path of the file to update relative to the workspace root.
2112    pub path: String,
2113    /// Unified diff content to apply to the existing file.
2114    pub diff: String,
2115}
2116
2117/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2118#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2119#[serde(tag = "type", rename_all = "snake_case")]
2120pub enum ApplyPatchOperationParam {
2121    CreateFile(ApplyPatchCreateFileOperationParam),
2122    DeleteFile(ApplyPatchDeleteFileOperationParam),
2123    UpdateFile(ApplyPatchUpdateFileOperationParam),
2124}
2125
2126/// A tool call representing a request to create, delete, or update files using diff patches.
2127#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2128pub struct ApplyPatchToolCallItemParam {
2129    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2130    #[serde(skip_serializing_if = "Option::is_none")]
2131    pub id: Option<String>,
2132    /// The unique ID of the apply patch tool call generated by the model.
2133    pub call_id: String,
2134    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2135    pub status: ApplyPatchCallStatusParam,
2136    /// The specific create, delete, or update instruction for the apply_patch tool call.
2137    pub operation: ApplyPatchOperationParam,
2138}
2139
2140/// Outcome values reported for apply_patch tool call outputs.
2141#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2142#[serde(rename_all = "snake_case")]
2143pub enum ApplyPatchCallOutputStatusParam {
2144    Completed,
2145    Failed,
2146}
2147
2148/// The streamed output emitted by an apply patch tool call.
2149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2150pub struct ApplyPatchToolCallOutputItemParam {
2151    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2152    #[serde(skip_serializing_if = "Option::is_none")]
2153    pub id: Option<String>,
2154    /// The unique ID of the apply patch tool call generated by the model.
2155    pub call_id: String,
2156    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2157    pub status: ApplyPatchCallOutputStatusParam,
2158    /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2159    #[serde(skip_serializing_if = "Option::is_none")]
2160    pub output: Option<String>,
2161}
2162
2163/// Shell exec action
2164/// Execute a shell command.
2165#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2166pub struct FunctionShellAction {
2167    /// A list of commands to run.
2168    pub commands: Vec<String>,
2169    /// Optional timeout in milliseconds for the commands.
2170    pub timeout_ms: Option<u64>,
2171    /// Optional maximum number of characters to return from each command.
2172    pub max_output_length: Option<u64>,
2173}
2174
2175/// Status values reported for function shell tool calls.
2176#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2177#[serde(rename_all = "snake_case")]
2178pub enum LocalShellCallStatus {
2179    InProgress,
2180    Completed,
2181    Incomplete,
2182}
2183
2184/// The environment for a shell call (response side).
2185#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2186#[serde(tag = "type", rename_all = "snake_case")]
2187pub enum FunctionShellCallEnvironment {
2188    /// A local computer environment.
2189    Local,
2190    /// A referenced container.
2191    ContainerReference(ContainerReferenceResource),
2192}
2193
2194/// A tool call that executes one or more shell commands in a managed environment.
2195#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2196pub struct FunctionShellCall {
2197    /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2198    pub id: String,
2199    /// The unique ID of the function shell tool call generated by the model.
2200    pub call_id: String,
2201    /// The shell commands and limits that describe how to run the tool call.
2202    pub action: FunctionShellAction,
2203    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2204    pub status: LocalShellCallStatus,
2205    /// The environment in which the shell commands were executed.
2206    pub environment: Option<FunctionShellCallEnvironment>,
2207    /// The ID of the entity that created this tool call.
2208    #[serde(skip_serializing_if = "Option::is_none")]
2209    pub created_by: Option<String>,
2210}
2211
2212/// The content of a shell tool call output that was emitted.
2213#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2214pub struct FunctionShellCallOutputContent {
2215    /// The standard output that was captured.
2216    pub stdout: String,
2217    /// The standard error output that was captured.
2218    pub stderr: String,
2219    /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2220    #[serde(flatten)]
2221    pub outcome: FunctionShellCallOutputOutcome,
2222    /// The identifier of the actor that created the item.
2223    #[serde(skip_serializing_if = "Option::is_none")]
2224    pub created_by: Option<String>,
2225}
2226
2227/// Function shell call outcome
2228#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2229#[serde(tag = "type", rename_all = "snake_case")]
2230pub enum FunctionShellCallOutputOutcome {
2231    Timeout,
2232    Exit(FunctionShellCallOutputExitOutcome),
2233}
2234
2235/// Indicates that the shell commands finished and returned an exit code.
2236#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2237pub struct FunctionShellCallOutputExitOutcome {
2238    /// Exit code from the shell process.
2239    pub exit_code: i32,
2240}
2241
2242/// The output of a shell tool call that was emitted.
2243#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2244pub struct FunctionShellCallOutput {
2245    /// The unique ID of the shell call output. Populated when this item is returned via API.
2246    pub id: String,
2247    /// The unique ID of the shell tool call generated by the model.
2248    pub call_id: String,
2249    /// An array of shell call output contents
2250    pub output: Vec<FunctionShellCallOutputContent>,
2251    /// The maximum length of the shell command output. This is generated by the model and should be
2252    /// passed back with the raw output.
2253    pub max_output_length: Option<u64>,
2254    /// The identifier of the actor that created the item.
2255    #[serde(skip_serializing_if = "Option::is_none")]
2256    pub created_by: Option<String>,
2257}
2258
2259/// Status values reported for apply_patch tool calls.
2260#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2261#[serde(rename_all = "snake_case")]
2262pub enum ApplyPatchCallStatus {
2263    InProgress,
2264    Completed,
2265}
2266
2267/// Instruction describing how to create a file via the apply_patch tool.
2268#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2269pub struct ApplyPatchCreateFileOperation {
2270    /// Path of the file to create.
2271    pub path: String,
2272    /// Diff to apply.
2273    pub diff: String,
2274}
2275
2276/// Instruction describing how to delete a file via the apply_patch tool.
2277#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2278pub struct ApplyPatchDeleteFileOperation {
2279    /// Path of the file to delete.
2280    pub path: String,
2281}
2282
2283/// Instruction describing how to update a file via the apply_patch tool.
2284#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2285pub struct ApplyPatchUpdateFileOperation {
2286    /// Path of the file to update.
2287    pub path: String,
2288    /// Diff to apply.
2289    pub diff: String,
2290}
2291
2292/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2294#[serde(tag = "type", rename_all = "snake_case")]
2295pub enum ApplyPatchOperation {
2296    CreateFile(ApplyPatchCreateFileOperation),
2297    DeleteFile(ApplyPatchDeleteFileOperation),
2298    UpdateFile(ApplyPatchUpdateFileOperation),
2299}
2300
2301/// A tool call that applies file diffs by creating, deleting, or updating files.
2302#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2303pub struct ApplyPatchToolCall {
2304    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2305    pub id: String,
2306    /// The unique ID of the apply patch tool call generated by the model.
2307    pub call_id: String,
2308    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2309    pub status: ApplyPatchCallStatus,
2310    /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2311    pub operation: ApplyPatchOperation,
2312    /// The ID of the entity that created this tool call.
2313    #[serde(skip_serializing_if = "Option::is_none")]
2314    pub created_by: Option<String>,
2315}
2316
2317/// Outcome values reported for apply_patch tool call outputs.
2318#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2319#[serde(rename_all = "snake_case")]
2320pub enum ApplyPatchCallOutputStatus {
2321    Completed,
2322    Failed,
2323}
2324
2325/// The output emitted by an apply patch tool call.
2326#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2327pub struct ApplyPatchToolCallOutput {
2328    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2329    pub id: String,
2330    /// The unique ID of the apply patch tool call generated by the model.
2331    pub call_id: String,
2332    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2333    pub status: ApplyPatchCallOutputStatus,
2334    /// Optional textual output returned by the apply patch tool.
2335    pub output: Option<String>,
2336    /// The ID of the entity that created this tool call output.
2337    #[serde(skip_serializing_if = "Option::is_none")]
2338    pub created_by: Option<String>,
2339}
2340
2341/// Output of an MCP server tool invocation.
2342#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2343pub struct MCPToolCall {
2344    /// A JSON string of the arguments passed to the tool.
2345    pub arguments: String,
2346    /// The unique ID of the tool call.
2347    pub id: String,
2348    /// The name of the tool that was run.
2349    pub name: String,
2350    /// The label of the MCP server running the tool.
2351    pub server_label: String,
2352    /// Unique identifier for the MCP tool call approval request. Include this value
2353    /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2354    /// tool call.
2355    pub approval_request_id: Option<String>,
2356    /// Error message from the call, if any.
2357    pub error: Option<String>,
2358    /// The output from the tool call.
2359    pub output: Option<String>,
2360    /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2361    /// `calling`, or `failed`.
2362    pub status: Option<MCPToolCallStatus>,
2363}
2364
2365#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2366#[serde(rename_all = "snake_case")]
2367pub enum MCPToolCallStatus {
2368    InProgress,
2369    Completed,
2370    Incomplete,
2371    Calling,
2372    Failed,
2373}
2374
2375#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2376pub struct MCPListTools {
2377    /// The unique ID of the list.
2378    pub id: String,
2379    /// The label of the MCP server.
2380    pub server_label: String,
2381    /// The tools available on the server.
2382    pub tools: Vec<MCPListToolsTool>,
2383    /// Error message if listing failed.
2384    #[serde(skip_serializing_if = "Option::is_none")]
2385    pub error: Option<String>,
2386}
2387
2388#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2389pub struct MCPApprovalRequest {
2390    /// JSON string of arguments for the tool.
2391    pub arguments: String,
2392    /// The unique ID of the approval request.
2393    pub id: String,
2394    /// The name of the tool to run.
2395    pub name: String,
2396    /// The label of the MCP server making the request.
2397    pub server_label: String,
2398}
2399
2400#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2401#[serde(untagged)]
2402pub enum Instructions {
2403    /// A text input to the model, equivalent to a text input with the `developer` role.
2404    Text(String),
2405    /// A list of one or many input items to the model, containing different content types.
2406    Array(Vec<InputItem>),
2407}
2408
2409/// The complete response returned by the Responses API.
2410#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2411pub struct Response {
2412    /// Whether to run the model response in the background.
2413    /// [Learn more](https://platform.openai.com/docs/guides/background).
2414    #[serde(skip_serializing_if = "Option::is_none")]
2415    pub background: Option<bool>,
2416
2417    /// Billing information for the response.
2418    #[serde(skip_serializing_if = "Option::is_none")]
2419    pub billing: Option<Billing>,
2420
2421    /// The conversation that this response belongs to. Input items and output
2422    /// items from this response are automatically added to this conversation.
2423    #[serde(skip_serializing_if = "Option::is_none")]
2424    pub conversation: Option<Conversation>,
2425
2426    /// Unix timestamp (in seconds) when this Response was created.
2427    pub created_at: u64,
2428
2429    /// Unix timestamp (in seconds) of when this Response was completed.
2430    /// Only present when the status is `completed`.
2431    #[serde(skip_serializing_if = "Option::is_none")]
2432    pub completed_at: Option<u64>,
2433
2434    /// An error object returned when the model fails to generate a Response.
2435    #[serde(skip_serializing_if = "Option::is_none")]
2436    pub error: Option<ErrorObject>,
2437
2438    /// Unique identifier for this response.
2439    pub id: String,
2440
2441    /// Details about why the response is incomplete, if any.
2442    #[serde(skip_serializing_if = "Option::is_none")]
2443    pub incomplete_details: Option<IncompleteDetails>,
2444
2445    /// A system (or developer) message inserted into the model's context.
2446    ///
2447    /// When using along with `previous_response_id`, the instructions from a previous response
2448    /// will not be carried over to the next response. This makes it simple to swap out
2449    /// system (or developer) messages in new responses.
2450    #[serde(skip_serializing_if = "Option::is_none")]
2451    pub instructions: Option<Instructions>,
2452
2453    /// An upper bound for the number of tokens that can be generated for a response,
2454    /// including visible output tokens and
2455    /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2456    #[serde(skip_serializing_if = "Option::is_none")]
2457    pub max_output_tokens: Option<u32>,
2458
2459    /// Set of 16 key-value pairs that can be attached to an object. This can be
2460    /// useful for storing additional information about the object in a structured
2461    /// format, and querying for objects via API or the dashboard.
2462    ///
2463    /// Keys are strings with a maximum length of 64 characters. Values are strings
2464    /// with a maximum length of 512 characters.
2465    #[serde(skip_serializing_if = "Option::is_none")]
2466    pub metadata: Option<HashMap<String, String>>,
2467
2468    /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2469    /// wide range of models with different capabilities, performance characteristics,
2470    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2471    pub model: String,
2472
2473    /// The object type of this resource - always set to `response`.
2474    pub object: String,
2475
2476    /// An array of content items generated by the model.
2477    ///
2478    /// - The length and order of items in the output array is dependent on the model's response.
2479    /// - Rather than accessing the first item in the output array and assuming it's an assistant
2480    ///   message with the content generated by the model, you might consider using
2481    ///   the `output_text` property where supported in SDKs.
2482    pub output: Vec<OutputItem>,
2483
2484    /// SDK-only convenience property that contains the aggregated text output from all
2485    /// `output_text` items in the `output` array, if any are present.
2486    /// Supported in the Python and JavaScript SDKs.
2487    // #[serde(skip_serializing_if = "Option::is_none")]
2488    // pub output_text: Option<String>,
2489
2490    /// Whether to allow the model to run tool calls in parallel.
2491    #[serde(skip_serializing_if = "Option::is_none")]
2492    pub parallel_tool_calls: Option<bool>,
2493
2494    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2495    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2496    /// Cannot be used in conjunction with `conversation`.
2497    #[serde(skip_serializing_if = "Option::is_none")]
2498    pub previous_response_id: Option<String>,
2499
2500    /// Reference to a prompt template and its variables.
2501    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2502    #[serde(skip_serializing_if = "Option::is_none")]
2503    pub prompt: Option<Prompt>,
2504
2505    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2506    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2507    #[serde(skip_serializing_if = "Option::is_none")]
2508    pub prompt_cache_key: Option<String>,
2509
2510    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2511    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2512    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2513    #[serde(skip_serializing_if = "Option::is_none")]
2514    pub prompt_cache_retention: Option<PromptCacheRetention>,
2515
2516    /// **gpt-5 and o-series models only**
2517    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2518    #[serde(skip_serializing_if = "Option::is_none")]
2519    pub reasoning: Option<Reasoning>,
2520
2521    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2522    /// usage policies.
2523    ///
2524    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2525    /// or email address, in order to avoid sending us any identifying information. [Learn
2526    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2527    #[serde(skip_serializing_if = "Option::is_none")]
2528    pub safety_identifier: Option<String>,
2529
2530    /// Specifies the processing type used for serving the request.
2531    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2532    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2533    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2534    /// - When not set, the default behavior is 'auto'.
2535    ///
2536    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2537    #[serde(skip_serializing_if = "Option::is_none")]
2538    pub service_tier: Option<ServiceTier>,
2539
2540    /// The status of the response generation.
2541    /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2542    pub status: Status,
2543
2544    /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2545    /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2546    ///
2547    /// We generally recommend altering this or `top_p` but not both.
2548    #[serde(skip_serializing_if = "Option::is_none")]
2549    pub temperature: Option<f32>,
2550
2551    /// Configuration options for a text response from the model. Can be plain
2552    /// text or structured JSON data. Learn more:
2553    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2554    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2555    #[serde(skip_serializing_if = "Option::is_none")]
2556    pub text: Option<ResponseTextParam>,
2557
2558    /// How the model should select which tool (or tools) to use when generating
2559    /// a response. See the `tools` parameter to see how to specify which tools
2560    /// the model can call.
2561    #[serde(skip_serializing_if = "Option::is_none")]
2562    pub tool_choice: Option<ToolChoiceParam>,
2563
2564    /// An array of tools the model may call while generating a response. You
2565    /// can specify which tool to use by setting the `tool_choice` parameter.
2566    ///
2567    /// We support the following categories of tools:
2568    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2569    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2570    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2571    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
2572    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2573    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
2574    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2575    /// - **Function calls (custom tools)**: Functions that are defined by you,
2576    ///   enabling the model to call your own code with strongly typed arguments
2577    ///   and outputs. Learn more about
2578    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2579    ///   custom tools to call your own code.
2580    #[serde(skip_serializing_if = "Option::is_none")]
2581    pub tools: Option<Vec<Tool>>,
2582
2583    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2584    /// token position, each with an associated log probability.
2585    #[serde(skip_serializing_if = "Option::is_none")]
2586    pub top_logprobs: Option<u8>,
2587
2588    /// An alternative to sampling with temperature, called nucleus sampling,
2589    /// where the model considers the results of the tokens with top_p probability
2590    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2591    /// are considered.
2592    ///
2593    /// We generally recommend altering this or `temperature` but not both.
2594    #[serde(skip_serializing_if = "Option::is_none")]
2595    pub top_p: Option<f32>,
2596
2597    ///The truncation strategy to use for the model response.
2598    /// - `auto`: If the input to this Response exceeds
2599    ///   the model's context window size, the model will truncate the
2600    ///   response to fit the context window by dropping items from the beginning of the conversation.
2601    /// - `disabled` (default): If the input size will exceed the context window
2602    ///   size for a model, the request will fail with a 400 error.
2603    #[serde(skip_serializing_if = "Option::is_none")]
2604    pub truncation: Option<Truncation>,
2605
2606    /// Represents token usage details including input tokens, output tokens,
2607    /// a breakdown of output tokens, and the total tokens used.
2608    #[serde(skip_serializing_if = "Option::is_none")]
2609    pub usage: Option<ResponseUsage>,
2610}
2611
2612#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2613#[serde(rename_all = "snake_case")]
2614pub enum Status {
2615    Completed,
2616    Failed,
2617    InProgress,
2618    Cancelled,
2619    Queued,
2620    Incomplete,
2621}
2622
2623/// Output item
2624#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2625#[serde(tag = "type")]
2626#[serde(rename_all = "snake_case")]
2627pub enum OutputItem {
2628    /// An output message from the model.
2629    Message(OutputMessage),
2630    /// The results of a file search tool call. See the
2631    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2632    /// for more information.
2633    FileSearchCall(FileSearchToolCall),
2634    /// A tool call to run a function. See the
2635    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2636    /// for more information.
2637    FunctionCall(FunctionToolCall),
2638    /// The results of a web search tool call. See the
2639    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2640    /// for more information.
2641    WebSearchCall(WebSearchToolCall),
2642    /// A tool call to a computer use tool. See the
2643    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2644    /// for more information.
2645    ComputerCall(ComputerToolCall),
2646    /// A description of the chain of thought used by a reasoning model while generating
2647    /// a response. Be sure to include these items in your `input` to the Responses API for
2648    /// subsequent turns of a conversation if you are manually
2649    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2650    Reasoning(ReasoningItem),
2651    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
2652    Compaction(CompactionBody),
2653    /// An image generation request made by the model.
2654    ImageGenerationCall(ImageGenToolCall),
2655    /// A tool call to run code.
2656    CodeInterpreterCall(CodeInterpreterToolCall),
2657    /// A tool call to run a command on the local shell.
2658    LocalShellCall(LocalShellToolCall),
2659    /// A tool call that executes one or more shell commands in a managed environment.
2660    ShellCall(FunctionShellCall),
2661    /// The output of a shell tool call.
2662    ShellCallOutput(FunctionShellCallOutput),
2663    /// A tool call that applies file diffs by creating, deleting, or updating files.
2664    ApplyPatchCall(ApplyPatchToolCall),
2665    /// The output emitted by an apply patch tool call.
2666    ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2667    /// An invocation of a tool on an MCP server.
2668    McpCall(MCPToolCall),
2669    /// A list of tools available on an MCP server.
2670    McpListTools(MCPListTools),
2671    /// A request for human approval of a tool invocation.
2672    McpApprovalRequest(MCPApprovalRequest),
2673    /// A call to a custom tool created by the model.
2674    CustomToolCall(CustomToolCall),
2675}
2676
2677#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2678#[non_exhaustive]
2679pub struct CustomToolCall {
2680    /// An identifier used to map this custom tool call to a tool call output.
2681    pub call_id: String,
2682    /// The input for the custom tool call generated by the model.
2683    pub input: String,
2684    /// The name of the custom tool being called.
2685    pub name: String,
2686    /// The unique ID of the custom tool call in the OpenAI platform.
2687    pub id: String,
2688}
2689
2690#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2691pub struct DeleteResponse {
2692    pub object: String,
2693    pub deleted: bool,
2694    pub id: String,
2695}
2696
2697#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2698pub struct AnyItemReference {
2699    pub r#type: Option<String>,
2700    pub id: String,
2701}
2702
2703#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2704#[serde(tag = "type", rename_all = "snake_case")]
2705pub enum ItemResourceItem {
2706    Message(MessageItem),
2707    FileSearchCall(FileSearchToolCall),
2708    ComputerCall(ComputerToolCall),
2709    ComputerCallOutput(ComputerCallOutputItemParam),
2710    WebSearchCall(WebSearchToolCall),
2711    FunctionCall(FunctionToolCall),
2712    FunctionCallOutput(FunctionCallOutputItemParam),
2713    ImageGenerationCall(ImageGenToolCall),
2714    CodeInterpreterCall(CodeInterpreterToolCall),
2715    LocalShellCall(LocalShellToolCall),
2716    LocalShellCallOutput(LocalShellToolCallOutput),
2717    ShellCall(FunctionShellCallItemParam),
2718    ShellCallOutput(FunctionShellCallOutputItemParam),
2719    ApplyPatchCall(ApplyPatchToolCallItemParam),
2720    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2721    McpListTools(MCPListTools),
2722    McpApprovalRequest(MCPApprovalRequest),
2723    McpApprovalResponse(MCPApprovalResponse),
2724    McpCall(MCPToolCall),
2725}
2726
2727#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2728#[serde(untagged)]
2729pub enum ItemResource {
2730    ItemReference(AnyItemReference),
2731    Item(ItemResourceItem),
2732}
2733
2734/// A list of Response items.
2735#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2736pub struct ResponseItemList {
2737    /// The type of object returned, must be `list`.
2738    pub object: String,
2739    /// The ID of the first item in the list.
2740    pub first_id: Option<String>,
2741    /// The ID of the last item in the list.
2742    pub last_id: Option<String>,
2743    /// Whether there are more items in the list.
2744    pub has_more: bool,
2745    /// The list of items.
2746    pub data: Vec<ItemResource>,
2747}
2748
2749#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2750#[builder(
2751    name = "TokenCountsBodyArgs",
2752    pattern = "mutable",
2753    setter(into, strip_option),
2754    default
2755)]
2756#[builder(build_fn(error = "OpenAIError"))]
2757pub struct TokenCountsBody {
2758    /// The conversation that this response belongs to. Items from this
2759    /// conversation are prepended to `input_items` for this response request.
2760    /// Input items and output items from this response are automatically added to this
2761    /// conversation after this response completes.
2762    #[serde(skip_serializing_if = "Option::is_none")]
2763    pub conversation: Option<ConversationParam>,
2764
2765    /// Text, image, or file inputs to the model, used to generate a response
2766    #[serde(skip_serializing_if = "Option::is_none")]
2767    pub input: Option<InputParam>,
2768
2769    /// A system (or developer) message inserted into the model's context.
2770    ///
2771    /// When used along with `previous_response_id`, the instructions from a previous response will
2772    /// not be carried over to the next response. This makes it simple to swap out system (or
2773    /// developer) messages in new responses.
2774    #[serde(skip_serializing_if = "Option::is_none")]
2775    pub instructions: Option<String>,
2776
2777    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2778    /// wide range of models with different capabilities, performance characteristics,
2779    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2780    /// to browse and compare available models.
2781    #[serde(skip_serializing_if = "Option::is_none")]
2782    pub model: Option<String>,
2783
2784    /// Whether to allow the model to run tool calls in parallel.
2785    #[serde(skip_serializing_if = "Option::is_none")]
2786    pub parallel_tool_calls: Option<bool>,
2787
2788    /// The unique ID of the previous response to the model. Use this to create multi-turn
2789    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2790    /// Cannot be used in conjunction with `conversation`.
2791    #[serde(skip_serializing_if = "Option::is_none")]
2792    pub previous_response_id: Option<String>,
2793
2794    /// **gpt-5 and o-series models only**
2795    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2796    #[serde(skip_serializing_if = "Option::is_none")]
2797    pub reasoning: Option<Reasoning>,
2798
2799    /// Configuration options for a text response from the model. Can be plain
2800    /// text or structured JSON data. Learn more:
2801    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2802    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2803    #[serde(skip_serializing_if = "Option::is_none")]
2804    pub text: Option<ResponseTextParam>,
2805
2806    /// How the model should select which tool (or tools) to use when generating
2807    /// a response. See the `tools` parameter to see how to specify which tools
2808    /// the model can call.
2809    #[serde(skip_serializing_if = "Option::is_none")]
2810    pub tool_choice: Option<ToolChoiceParam>,
2811
2812    /// An array of tools the model may call while generating a response. You can specify which tool
2813    /// to use by setting the `tool_choice` parameter.
2814    #[serde(skip_serializing_if = "Option::is_none")]
2815    pub tools: Option<Vec<Tool>>,
2816
2817    ///The truncation strategy to use for the model response.
2818    /// - `auto`: If the input to this Response exceeds
2819    ///   the model's context window size, the model will truncate the
2820    ///   response to fit the context window by dropping items from the beginning of the conversation.
2821    /// - `disabled` (default): If the input size will exceed the context window
2822    ///   size for a model, the request will fail with a 400 error.
2823    #[serde(skip_serializing_if = "Option::is_none")]
2824    pub truncation: Option<Truncation>,
2825}
2826
2827#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2828pub struct TokenCountsResource {
2829    pub object: String,
2830    pub input_tokens: u32,
2831}
2832
2833/// A compaction item generated by the `/v1/responses/compact` API.
2834#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2835pub struct CompactionSummaryItemParam {
2836    /// The ID of the compaction item.
2837    #[serde(skip_serializing_if = "Option::is_none")]
2838    pub id: Option<String>,
2839    /// The encrypted content of the compaction summary.
2840    pub encrypted_content: String,
2841}
2842
2843/// A compaction item generated by the `/v1/responses/compact` API.
2844#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2845pub struct CompactionBody {
2846    /// The unique ID of the compaction item.
2847    pub id: String,
2848    /// The encrypted content that was produced by compaction.
2849    pub encrypted_content: String,
2850    /// The identifier of the actor that created the item.
2851    #[serde(skip_serializing_if = "Option::is_none")]
2852    pub created_by: Option<String>,
2853}
2854
2855/// Request to compact a conversation.
2856#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
2857#[builder(name = "CompactResponseRequestArgs")]
2858#[builder(pattern = "mutable")]
2859#[builder(setter(into, strip_option), default)]
2860#[builder(derive(Debug))]
2861#[builder(build_fn(error = "OpenAIError"))]
2862pub struct CompactResponseRequest {
2863    /// Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of models
2864    /// with different capabilities, performance characteristics, and price points. Refer to the
2865    /// [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2866    pub model: String,
2867
2868    /// Text, image, or file inputs to the model, used to generate a response
2869    #[serde(skip_serializing_if = "Option::is_none")]
2870    pub input: Option<InputParam>,
2871
2872    /// The unique ID of the previous response to the model. Use this to create multi-turn
2873    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2874    /// Cannot be used in conjunction with `conversation`.
2875    #[serde(skip_serializing_if = "Option::is_none")]
2876    pub previous_response_id: Option<String>,
2877
2878    /// A system (or developer) message inserted into the model's context.
2879    ///
2880    /// When used along with `previous_response_id`, the instructions from a previous response will
2881    /// not be carried over to the next response. This makes it simple to swap out system (or
2882    /// developer) messages in new responses.
2883    #[serde(skip_serializing_if = "Option::is_none")]
2884    pub instructions: Option<String>,
2885}
2886
2887/// The compacted response object.
2888#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2889pub struct CompactResource {
2890    /// The unique identifier for the compacted response.
2891    pub id: String,
2892    /// The object type. Always `response.compaction`.
2893    pub object: String,
2894    /// The compacted list of output items. This is a list of all user messages,
2895    /// followed by a single compaction item.
2896    pub output: Vec<OutputItem>,
2897    /// Unix timestamp (in seconds) when the compacted conversation was created.
2898    pub created_at: u64,
2899    /// Token accounting for the compaction pass, including cached, reasoning, and total tokens.
2900    pub usage: ResponseUsage,
2901}
2902
2903// ============================================================
2904// Container / Environment Types
2905// ============================================================
2906
2907/// A domain-scoped secret injected for allowlisted domains.
2908#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2909pub struct ContainerNetworkPolicyDomainSecretParam {
2910    /// The domain associated with the secret.
2911    pub domain: String,
2912    /// The name of the secret to inject for the domain.
2913    pub name: String,
2914    /// The secret value to inject for the domain.
2915    pub value: String,
2916}
2917
2918/// Details for an allowlist network policy.
2919#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2920pub struct ContainerNetworkPolicyAllowlistDetails {
2921    /// A list of allowed domains.
2922    pub allowed_domains: Vec<String>,
2923    /// Optional domain-scoped secrets for allowlisted domains.
2924    #[serde(skip_serializing_if = "Option::is_none")]
2925    pub domain_secrets: Option<Vec<ContainerNetworkPolicyDomainSecretParam>>,
2926}
2927
2928/// Network access policy for a container.
2929#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2930#[serde(tag = "type", rename_all = "snake_case")]
2931pub enum ContainerNetworkPolicy {
2932    /// Disable all outbound network access.
2933    Disabled,
2934    /// Allow access only to specified domains.
2935    Allowlist(ContainerNetworkPolicyAllowlistDetails),
2936}
2937
2938/// A skill referenced by ID.
2939#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2940pub struct SkillReferenceParam {
2941    /// The ID of the skill to reference.
2942    pub skill_id: String,
2943    /// An optional specific version to use.
2944    #[serde(skip_serializing_if = "Option::is_none")]
2945    pub version: Option<String>,
2946}
2947
2948/// An inline skill source (base64-encoded zip).
2949#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2950pub struct InlineSkillSourceParam {
2951    /// The media type. Always `"application/zip"`.
2952    pub media_type: String,
2953    /// The base64-encoded skill data.
2954    pub data: String,
2955}
2956
2957/// An inline skill definition.
2958#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2959pub struct InlineSkillParam {
2960    /// The name of the skill.
2961    pub name: String,
2962    /// The description of the skill.
2963    pub description: String,
2964    /// The inline source for the skill.
2965    pub source: InlineSkillSourceParam,
2966}
2967
2968/// A skill parameter — either a reference or inline definition.
2969#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2970#[serde(tag = "type", rename_all = "snake_case")]
2971pub enum SkillParam {
2972    /// Reference a skill by ID.
2973    SkillReference(SkillReferenceParam),
2974    /// Provide an inline skill definition.
2975    Inline(InlineSkillParam),
2976}
2977
2978/// Automatically creates a container for the request.
2979#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
2980pub struct ContainerAutoParam {
2981    /// An optional list of uploaded file IDs to make available in the container.
2982    #[serde(skip_serializing_if = "Option::is_none")]
2983    pub file_ids: Option<Vec<String>>,
2984    /// Network access policy for the container.
2985    #[serde(skip_serializing_if = "Option::is_none")]
2986    pub network_policy: Option<ContainerNetworkPolicy>,
2987    /// An optional list of skills to make available in the container.
2988    #[serde(skip_serializing_if = "Option::is_none")]
2989    pub skills: Option<Vec<SkillParam>>,
2990}
2991
2992/// A local skill available in a local environment.
2993#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2994pub struct LocalSkillParam {
2995    /// The name of the skill.
2996    pub name: String,
2997    /// The description of the skill.
2998    pub description: String,
2999    /// The path to the directory containing the skill.
3000    pub path: String,
3001}
3002
3003/// Uses a local computer environment.
3004#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3005pub struct LocalEnvironmentParam {
3006    /// An optional list of local skills.
3007    #[serde(skip_serializing_if = "Option::is_none")]
3008    pub skills: Option<Vec<LocalSkillParam>>,
3009}
3010
3011/// References a container created with the /v1/containers endpoint.
3012#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3013pub struct ContainerReferenceParam {
3014    /// The ID of the referenced container.
3015    pub container_id: String,
3016}
3017
3018/// A resource reference to a container by ID.
3019#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3020pub struct ContainerReferenceResource {
3021    /// The ID of the referenced container.
3022    pub container_id: String,
3023}
3024
3025/// The execution environment for a shell tool — container or local.
3026#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3027#[serde(tag = "type", rename_all = "snake_case")]
3028pub enum FunctionShellEnvironment {
3029    /// Automatically creates a container for this request.
3030    ContainerAuto(ContainerAutoParam),
3031    /// Use a local computer environment.
3032    Local(LocalEnvironmentParam),
3033    /// Reference an existing container by ID.
3034    ContainerReference(ContainerReferenceParam),
3035}
3036
3037/// Parameters for the shell function tool.
3038#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3039pub struct FunctionShellToolParam {
3040    /// The execution environment for the shell tool.
3041    #[serde(skip_serializing_if = "Option::is_none")]
3042    pub environment: Option<FunctionShellEnvironment>,
3043}
3044
3045/// Context management configuration.
3046#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3047pub struct ContextManagementParam {
3048    /// The context management strategy type.
3049    #[serde(rename = "type")]
3050    pub type_: String,
3051    /// Minimum number of tokens to retain before compacting.
3052    #[serde(skip_serializing_if = "Option::is_none")]
3053    pub compact_threshold: Option<u32>,
3054}