async_openai/types/responses/
response.rs

1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4    CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5    ResponseUsage,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15    #[default]
16    User,
17    Assistant,
18    System,
19    Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26    InProgress,
27    Completed,
28    Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34    ///  A text input to the model, equivalent to a text input with the
35    /// `user` role.
36    Text(String),
37    /// A list of one or many input items to the model, containing
38    /// different content types.
39    Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52    /// A message (type: "message").
53    /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54    ///
55    /// InputMessage:
56    ///     A message input to the model with a role indicating instruction following hierarchy.
57    ///     Instructions given with the developer or system role take precedence over instructions given with the user role.
58    /// OutputMessage:
59    ///     A message output from the model.
60    Message(MessageItem),
61
62    /// The results of a file search tool call. See the
63    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64    FileSearchCall(FileSearchToolCall),
65
66    /// A tool call to a computer use tool. See the
67    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68    ComputerCall(ComputerToolCall),
69
70    /// The output of a computer tool call.
71    ComputerCallOutput(ComputerCallOutputItemParam),
72
73    /// The results of a web search tool call. See the
74    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75    WebSearchCall(WebSearchToolCall),
76
77    /// A tool call to run a function. See the
78    ///
79    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80    FunctionCall(FunctionToolCall),
81
82    /// The output of a function tool call.
83    FunctionCallOutput(FunctionCallOutputItemParam),
84
85    /// A description of the chain of thought used by a reasoning model while generating
86    /// a response. Be sure to include these items in your `input` to the Responses API
87    /// for subsequent turns of a conversation if you are manually
88    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89    Reasoning(ReasoningItem),
90
91    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
92    Compaction(CompactionSummaryItemParam),
93
94    /// An image generation request made by the model.
95    ImageGenerationCall(ImageGenToolCall),
96
97    /// A tool call to run code.
98    CodeInterpreterCall(CodeInterpreterToolCall),
99
100    /// A tool call to run a command on the local shell.
101    LocalShellCall(LocalShellToolCall),
102
103    /// The output of a local shell tool call.
104    LocalShellCallOutput(LocalShellToolCallOutput),
105
106    /// A tool representing a request to execute one or more shell commands.
107    ShellCall(FunctionShellCallItemParam),
108
109    /// The streamed output items emitted by a shell tool call.
110    ShellCallOutput(FunctionShellCallOutputItemParam),
111
112    /// A tool call representing a request to create, delete, or update files using diff patches.
113    ApplyPatchCall(ApplyPatchToolCallItemParam),
114
115    /// The streamed output emitted by an apply patch tool call.
116    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
117
118    /// A list of tools available on an MCP server.
119    McpListTools(MCPListTools),
120
121    /// A request for human approval of a tool invocation.
122    McpApprovalRequest(MCPApprovalRequest),
123
124    /// A response to an MCP approval request.
125    McpApprovalResponse(MCPApprovalResponse),
126
127    /// An invocation of a tool on an MCP server.
128    McpCall(MCPToolCall),
129
130    /// The output of a custom tool call from your code, being sent back to the model.
131    CustomToolCallOutput(CustomToolCallOutput),
132
133    /// A call to a custom tool created by the model.
134    CustomToolCall(CustomToolCall),
135}
136
137/// Input item that can be used in the context for generating a response.
138///
139/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
140/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
141/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
142/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
143///
144/// Uses untagged deserialization because these types overlap in structure.
145/// Order matters: more specific structures are tried first.
146///
147/// # OpenAPI Specification
148/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
150#[serde(untagged)]
151pub enum InputItem {
152    /// A reference to an existing item by ID.
153    /// Has a required `id` field and optional `type` (can be "item_reference" or null).
154    /// Must be tried first as it's the most minimal structure.
155    ItemReference(ItemReference),
156
157    /// All structured items with proper type discrimination.
158    /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
159    /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
160    Item(Item),
161
162    /// A simple, user-friendly message input (EasyInputMessage).
163    /// Supports string content and can include assistant role for previous responses.
164    /// Must be tried last as it's the most flexible structure.
165    ///
166    /// A message input to the model with a role indicating instruction following
167    /// hierarchy. Instructions given with the `developer` or `system` role take
168    /// precedence over instructions given with the `user` role. Messages with the
169    /// `assistant` role are presumed to have been generated by the model in previous
170    /// interactions.
171    EasyMessage(EasyInputMessage),
172}
173
174/// A message item used within the `Item` enum.
175///
176/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
177/// enum to distinguish them based on their structure:
178/// - OutputMessage: role=assistant, required id & status fields
179/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
180///
181/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
182/// not part of the structured `Item` enum.
183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
184#[serde(untagged)]
185pub enum MessageItem {
186    /// An output message from the model (role: assistant, has required id & status).
187    /// This must come first as it has the most specific structure (required id and status fields).
188    Output(OutputMessage),
189
190    /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
191    /// Has structured content list and optional id/status fields.
192    ///
193    /// A message input to the model with a role indicating instruction following hierarchy.
194    /// Instructions given with the `developer` or `system` role take precedence over instructions
195    /// given with the `user` role.
196    Input(InputMessage),
197}
198
199/// A reference to an existing item by ID.
200#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
201pub struct ItemReference {
202    /// The type of item to reference. Can be "item_reference" or null.
203    #[serde(skip_serializing_if = "Option::is_none")]
204    pub r#type: Option<ItemReferenceType>,
205    /// The ID of the item to reference.
206    pub id: String,
207}
208
209#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
210#[serde(rename_all = "snake_case")]
211pub enum ItemReferenceType {
212    ItemReference,
213}
214
215/// Output from a function call that you're providing back to the model.
216#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
217pub struct FunctionCallOutputItemParam {
218    /// The unique ID of the function tool call generated by the model.
219    pub call_id: String,
220    /// Text, image, or file output of the function tool call.
221    pub output: FunctionCallOutput,
222    /// The unique ID of the function tool call output.
223    /// Populated when this item is returned via API.
224    #[serde(skip_serializing_if = "Option::is_none")]
225    pub id: Option<String>,
226    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
227    /// Populated when items are returned via API.
228    #[serde(skip_serializing_if = "Option::is_none")]
229    pub status: Option<OutputStatus>,
230}
231
232#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
233#[serde(untagged)]
234pub enum FunctionCallOutput {
235    /// A JSON string of the output of the function tool call.
236    Text(String),
237    Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
238}
239
240#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
241pub struct ComputerCallOutputItemParam {
242    /// The ID of the computer tool call that produced the output.
243    pub call_id: String,
244    /// A computer screenshot image used with the computer use tool.
245    pub output: ComputerScreenshotImage,
246    /// The safety checks reported by the API that have been acknowledged by the developer.
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
249    /// The unique ID of the computer tool call output. Optional when creating.
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub id: Option<String>,
252    /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
253    /// Populated when input items are returned via API.
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
256}
257
258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
259#[serde(rename_all = "snake_case")]
260pub enum ComputerScreenshotImageType {
261    ComputerScreenshot,
262}
263
264/// A computer screenshot image used with the computer use tool.
265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
266pub struct ComputerScreenshotImage {
267    /// Specifies the event type. For a computer screenshot, this property is always
268    /// set to `computer_screenshot`.
269    pub r#type: ComputerScreenshotImageType,
270    /// The identifier of an uploaded file that contains the screenshot.
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub file_id: Option<String>,
273    /// The URL of the screenshot image.
274    #[serde(skip_serializing_if = "Option::is_none")]
275    pub image_url: Option<String>,
276}
277
278/// Output from a local shell tool call that you're providing back to the model.
279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
280pub struct LocalShellToolCallOutput {
281    /// The unique ID of the local shell tool call generated by the model.
282    pub id: String,
283
284    /// A JSON string of the output of the local shell tool call.
285    pub output: String,
286
287    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
288    #[serde(skip_serializing_if = "Option::is_none")]
289    pub status: Option<OutputStatus>,
290}
291
292/// Output from a local shell command execution.
293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
294pub struct LocalShellOutput {
295    /// The stdout output from the command.
296    #[serde(skip_serializing_if = "Option::is_none")]
297    pub stdout: Option<String>,
298
299    /// The stderr output from the command.
300    #[serde(skip_serializing_if = "Option::is_none")]
301    pub stderr: Option<String>,
302
303    /// The exit code of the command.
304    #[serde(skip_serializing_if = "Option::is_none")]
305    pub exit_code: Option<i32>,
306}
307
308/// An MCP approval response that you're providing back to the model.
309#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
310pub struct MCPApprovalResponse {
311    /// The ID of the approval request being answered.
312    pub approval_request_id: String,
313
314    /// Whether the request was approved.
315    pub approve: bool,
316
317    /// The unique ID of the approval response
318    #[serde(skip_serializing_if = "Option::is_none")]
319    pub id: Option<String>,
320
321    /// Optional reason for the decision.
322    #[serde(skip_serializing_if = "Option::is_none")]
323    pub reason: Option<String>,
324}
325
326#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
327#[serde(untagged)]
328pub enum CustomToolCallOutputOutput {
329    /// A string of the output of the custom tool call.
330    Text(String),
331    /// Text, image, or file output of the custom tool call.
332    List(Vec<InputContent>),
333}
334
335#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
336pub struct CustomToolCallOutput {
337    /// The call ID, used to map this custom tool call output to a custom tool call.
338    pub call_id: String,
339
340    /// The output from the custom tool call generated by your code.
341    /// Can be a string or an list of output content.
342    pub output: CustomToolCallOutputOutput,
343
344    /// The unique ID of the custom tool call output in the OpenAI platform.
345    #[serde(skip_serializing_if = "Option::is_none")]
346    pub id: Option<String>,
347}
348
349/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
350///
351/// This is the most user-friendly way to provide messages, supporting both simple
352/// string content and structured content. Role can include `assistant` for providing
353/// previous assistant responses.
354#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
355#[builder(
356    name = "EasyInputMessageArgs",
357    pattern = "mutable",
358    setter(into, strip_option),
359    default
360)]
361#[builder(build_fn(error = "OpenAIError"))]
362pub struct EasyInputMessage {
363    /// The type of the message input. Always set to `message`.
364    pub r#type: MessageType,
365    /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
366    pub role: Role,
367    /// Text, image, or audio input to the model, used to generate a response.
368    /// Can also contain previous assistant responses.
369    pub content: EasyInputContent,
370}
371
372/// A structured message input to the model (InputMessage in the OpenAPI spec).
373///
374/// This variant requires structured content (not a simple string) and does not support
375/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
377#[builder(
378    name = "InputMessageArgs",
379    pattern = "mutable",
380    setter(into, strip_option),
381    default
382)]
383#[builder(build_fn(error = "OpenAIError"))]
384pub struct InputMessage {
385    /// A list of one or many input items to the model, containing different content types.
386    pub content: Vec<InputContent>,
387    /// The role of the message input. One of `user`, `system`, or `developer`.
388    /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
389    pub role: InputRole,
390    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
391    /// Populated when items are returned via API.
392    #[serde(skip_serializing_if = "Option::is_none")]
393    pub status: Option<OutputStatus>,
394    /////The type of the message input. Always set to `message`.
395    //pub r#type: MessageType,
396}
397
398/// The role for an input message - can only be `user`, `system`, or `developer`.
399/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
400#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
401#[serde(rename_all = "lowercase")]
402pub enum InputRole {
403    #[default]
404    User,
405    System,
406    Developer,
407}
408
409/// Content for EasyInputMessage - can be a simple string or structured list.
410#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
411#[serde(untagged)]
412pub enum EasyInputContent {
413    /// A text input to the model.
414    Text(String),
415    /// A list of one or many input items to the model, containing different content types.
416    ContentList(Vec<InputContent>),
417}
418
419/// Parts of a message: text, image, file, or audio.
420#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
421#[serde(tag = "type", rename_all = "snake_case")]
422pub enum InputContent {
423    /// A text input to the model.
424    InputText(InputTextContent),
425    /// An image input to the model. Learn about
426    /// [image inputs](https://platform.openai.com/docs/guides/vision).
427    InputImage(InputImageContent),
428    /// A file input to the model.
429    InputFile(InputFileContent),
430}
431
432#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
433pub struct InputTextContent {
434    /// The text input to the model.
435    pub text: String,
436}
437
438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
439#[builder(
440    name = "InputImageArgs",
441    pattern = "mutable",
442    setter(into, strip_option),
443    default
444)]
445#[builder(build_fn(error = "OpenAIError"))]
446pub struct InputImageContent {
447    /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
448    /// Defaults to `auto`.
449    pub detail: ImageDetail,
450    /// The ID of the file to be sent to the model.
451    #[serde(skip_serializing_if = "Option::is_none")]
452    pub file_id: Option<String>,
453    /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
454    /// in a data URL.
455    #[serde(skip_serializing_if = "Option::is_none")]
456    pub image_url: Option<String>,
457}
458
459#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
460#[builder(
461    name = "InputFileArgs",
462    pattern = "mutable",
463    setter(into, strip_option),
464    default
465)]
466#[builder(build_fn(error = "OpenAIError"))]
467pub struct InputFileContent {
468    /// The content of the file to be sent to the model.
469    #[serde(skip_serializing_if = "Option::is_none")]
470    file_data: Option<String>,
471    /// The ID of the file to be sent to the model.
472    #[serde(skip_serializing_if = "Option::is_none")]
473    file_id: Option<String>,
474    /// The URL of the file to be sent to the model.
475    #[serde(skip_serializing_if = "Option::is_none")]
476    file_url: Option<String>,
477    /// The name of the file to be sent to the model.
478    #[serde(skip_serializing_if = "Option::is_none")]
479    filename: Option<String>,
480}
481
482#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
483pub struct Conversation {
484    /// The unique ID of the conversation.
485    pub id: String,
486}
487
488#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
489#[serde(untagged)]
490pub enum ConversationParam {
491    /// The unique ID of the conversation.
492    ConversationID(String),
493    /// The conversation that this response belongs to.
494    Object(Conversation),
495}
496
497#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
498pub enum IncludeEnum {
499    #[serde(rename = "file_search_call.results")]
500    FileSearchCallResults,
501    #[serde(rename = "web_search_call.results")]
502    WebSearchCallResults,
503    #[serde(rename = "web_search_call.action.sources")]
504    WebSearchCallActionSources,
505    #[serde(rename = "message.input_image.image_url")]
506    MessageInputImageImageUrl,
507    #[serde(rename = "computer_call_output.output.image_url")]
508    ComputerCallOutputOutputImageUrl,
509    #[serde(rename = "code_interpreter_call.outputs")]
510    CodeInterpreterCallOutputs,
511    #[serde(rename = "reasoning.encrypted_content")]
512    ReasoningEncryptedContent,
513    #[serde(rename = "message.output_text.logprobs")]
514    MessageOutputTextLogprobs,
515}
516
517#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
518pub struct ResponseStreamOptions {
519    /// When true, stream obfuscation will be enabled. Stream obfuscation adds
520    /// random characters to an `obfuscation` field on streaming delta events to
521    /// normalize payload sizes as a mitigation to certain side-channel attacks.
522    /// These obfuscation fields are included by default, but add a small amount
523    /// of overhead to the data stream. You can set `include_obfuscation` to
524    /// false to optimize for bandwidth if you trust the network links between
525    /// your application and the OpenAI API.
526    #[serde(skip_serializing_if = "Option::is_none")]
527    pub include_obfuscation: Option<bool>,
528}
529
530/// Builder for a Responses API request.
531#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
532#[builder(
533    name = "CreateResponseArgs",
534    pattern = "mutable",
535    setter(into, strip_option),
536    default
537)]
538#[builder(build_fn(error = "OpenAIError"))]
539pub struct CreateResponse {
540    /// Whether to run the model response in the background.
541    /// [Learn more](https://platform.openai.com/docs/guides/background).
542    #[serde(skip_serializing_if = "Option::is_none")]
543    pub background: Option<bool>,
544
545    /// The conversation that this response belongs to. Items from this conversation are prepended to
546    ///  `input_items` for this response request.
547    ///
548    /// Input items and output items from this response are automatically added to this conversation after
549    /// this response completes.
550    #[serde(skip_serializing_if = "Option::is_none")]
551    pub conversation: Option<ConversationParam>,
552
553    /// Specify additional output data to include in the model response. Currently supported
554    /// values are:
555    ///
556    /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
557    ///
558    /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
559    ///   interpreter tool call items.
560    ///
561    /// - `computer_call_output.output.image_url`: Include image urls from the computer call
562    ///   output.
563    ///
564    /// - `file_search_call.results`: Include the search results of the file search tool call.
565    ///
566    /// - `message.input_image.image_url`: Include image urls from the input message.
567    ///
568    /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
569    ///
570    /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
571    ///   reasoning item outputs. This enables reasoning items to be used in multi-turn
572    ///   conversations when using the Responses API statelessly (like when the `store` parameter is
573    ///   set to `false`, or when an organization is enrolled in the zero data retention program).
574    #[serde(skip_serializing_if = "Option::is_none")]
575    pub include: Option<Vec<IncludeEnum>>,
576
577    /// Text, image, or file inputs to the model, used to generate a response.
578    ///
579    /// Learn more:
580    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
581    /// - [Image inputs](https://platform.openai.com/docs/guides/images)
582    /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
583    /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
584    /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
585    pub input: InputParam,
586
587    /// A system (or developer) message inserted into the model's context.
588    ///
589    /// When using along with `previous_response_id`, the instructions from a previous
590    /// response will not be carried over to the next response. This makes it simple
591    /// to swap out system (or developer) messages in new responses.
592    #[serde(skip_serializing_if = "Option::is_none")]
593    pub instructions: Option<String>,
594
595    /// An upper bound for the number of tokens that can be generated for a response, including
596    /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
597    #[serde(skip_serializing_if = "Option::is_none")]
598    pub max_output_tokens: Option<u32>,
599
600    /// The maximum number of total calls to built-in tools that can be processed in a response. This
601    /// maximum number applies across all built-in tool calls, not per individual tool. Any further
602    /// attempts to call a tool by the model will be ignored.
603    #[serde(skip_serializing_if = "Option::is_none")]
604    pub max_tool_calls: Option<u32>,
605
606    /// Set of 16 key-value pairs that can be attached to an object. This can be
607    /// useful for storing additional information about the object in a structured
608    /// format, and querying for objects via API or the dashboard.
609    ///
610    /// Keys are strings with a maximum length of 64 characters. Values are
611    /// strings with a maximum length of 512 characters.
612    #[serde(skip_serializing_if = "Option::is_none")]
613    pub metadata: Option<HashMap<String, String>>,
614
615    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
616    /// offers a wide range of models with different capabilities, performance
617    /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
618    /// to browse and compare available models.
619    #[serde(skip_serializing_if = "Option::is_none")]
620    pub model: Option<String>,
621
622    /// Whether to allow the model to run tool calls in parallel.
623    #[serde(skip_serializing_if = "Option::is_none")]
624    pub parallel_tool_calls: Option<bool>,
625
626    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
627    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
628    /// Cannot be used in conjunction with `conversation`.
629    #[serde(skip_serializing_if = "Option::is_none")]
630    pub previous_response_id: Option<String>,
631
632    /// Reference to a prompt template and its variables.
633    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
634    #[serde(skip_serializing_if = "Option::is_none")]
635    pub prompt: Option<Prompt>,
636
637    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
638    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
639    #[serde(skip_serializing_if = "Option::is_none")]
640    pub prompt_cache_key: Option<String>,
641
642    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
643    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
644    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).    
645    #[serde(skip_serializing_if = "Option::is_none")]
646    pub prompt_cache_retention: Option<PromptCacheRetention>,
647
648    /// **gpt-5 and o-series models only**
649    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
650    #[serde(skip_serializing_if = "Option::is_none")]
651    pub reasoning: Option<Reasoning>,
652
653    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
654    /// usage policies.
655    ///
656    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
657    /// or email address, in order to avoid sending us any identifying information. [Learn
658    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
659    #[serde(skip_serializing_if = "Option::is_none")]
660    pub safety_identifier: Option<String>,
661
662    /// Specifies the processing type used for serving the request.
663    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
664    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
665    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
666    /// - When not set, the default behavior is 'auto'.
667    ///
668    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
669    #[serde(skip_serializing_if = "Option::is_none")]
670    pub service_tier: Option<ServiceTier>,
671
672    /// Whether to store the generated model response for later retrieval via API.
673    #[serde(skip_serializing_if = "Option::is_none")]
674    pub store: Option<bool>,
675
676    /// If set to true, the model response data will be streamed to the client
677    /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
678    /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
679    /// for more information.
680    #[serde(skip_serializing_if = "Option::is_none")]
681    pub stream: Option<bool>,
682
683    /// Options for streaming responses. Only set this when you set `stream: true`.
684    #[serde(skip_serializing_if = "Option::is_none")]
685    pub stream_options: Option<ResponseStreamOptions>,
686
687    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
688    /// will make the output more random, while lower values like 0.2 will make it
689    /// more focused and deterministic. We generally recommend altering this or
690    /// `top_p` but not both.
691    #[serde(skip_serializing_if = "Option::is_none")]
692    pub temperature: Option<f32>,
693
694    /// Configuration options for a text response from the model. Can be plain
695    /// text or structured JSON data. Learn more:
696    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
697    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
698    #[serde(skip_serializing_if = "Option::is_none")]
699    pub text: Option<ResponseTextParam>,
700
701    /// How the model should select which tool (or tools) to use when generating
702    /// a response. See the `tools` parameter to see how to specify which tools
703    /// the model can call.
704    #[serde(skip_serializing_if = "Option::is_none")]
705    pub tool_choice: Option<ToolChoiceParam>,
706
707    /// An array of tools the model may call while generating a response. You
708    /// can specify which tool to use by setting the `tool_choice` parameter.
709    ///
710    /// We support the following categories of tools:
711    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
712    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
713    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
714    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
715    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
716    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
717    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
718    /// - **Function calls (custom tools)**: Functions that are defined by you,
719    ///   enabling the model to call your own code with strongly typed arguments
720    ///   and outputs. Learn more about
721    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
722    ///   custom tools to call your own code.
723    #[serde(skip_serializing_if = "Option::is_none")]
724    pub tools: Option<Vec<Tool>>,
725
726    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
727    /// token position, each with an associated log probability.
728    #[serde(skip_serializing_if = "Option::is_none")]
729    pub top_logprobs: Option<u8>,
730
731    /// An alternative to sampling with temperature, called nucleus sampling,
732    /// where the model considers the results of the tokens with top_p probability
733    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
734    /// are considered.
735    ///
736    /// We generally recommend altering this or `temperature` but not both.
737    #[serde(skip_serializing_if = "Option::is_none")]
738    pub top_p: Option<f32>,
739
740    ///The truncation strategy to use for the model response.
741    /// - `auto`: If the input to this Response exceeds
742    ///   the model's context window size, the model will truncate the
743    ///   response to fit the context window by dropping items from the beginning of the conversation.
744    /// - `disabled` (default): If the input size will exceed the context window
745    ///   size for a model, the request will fail with a 400 error.
746    #[serde(skip_serializing_if = "Option::is_none")]
747    pub truncation: Option<Truncation>,
748}
749
750#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
751#[serde(untagged)]
752pub enum ResponsePromptVariables {
753    String(String),
754    Content(InputContent),
755    Custom(serde_json::Value),
756}
757
758#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
759pub struct Prompt {
760    /// The unique identifier of the prompt template to use.
761    pub id: String,
762
763    /// Optional version of the prompt template.
764    #[serde(skip_serializing_if = "Option::is_none")]
765    pub version: Option<String>,
766
767    /// Optional map of values to substitute in for variables in your
768    /// prompt. The substitution values can either be strings, or other
769    /// Response input types like images or files.
770    #[serde(skip_serializing_if = "Option::is_none")]
771    pub variables: Option<ResponsePromptVariables>,
772}
773
774#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
775#[serde(rename_all = "lowercase")]
776pub enum ServiceTier {
777    #[default]
778    Auto,
779    Default,
780    Flex,
781    Scale,
782    Priority,
783}
784
785/// Truncation strategies.
786#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
787#[serde(rename_all = "lowercase")]
788pub enum Truncation {
789    Auto,
790    Disabled,
791}
792
793#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
794pub struct Billing {
795    pub payer: String,
796}
797
798/// o-series reasoning settings.
799#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
800#[builder(
801    name = "ReasoningArgs",
802    pattern = "mutable",
803    setter(into, strip_option),
804    default
805)]
806#[builder(build_fn(error = "OpenAIError"))]
807pub struct Reasoning {
808    /// Constrains effort on reasoning for
809    /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
810    /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
811    /// reasoning effort can result in faster responses and fewer tokens used
812    /// on reasoning in a response.
813    ///
814    /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
815    #[serde(skip_serializing_if = "Option::is_none")]
816    pub effort: Option<ReasoningEffort>,
817    /// A summary of the reasoning performed by the model. This can be
818    /// useful for debugging and understanding the model's reasoning process.
819    /// One of `auto`, `concise`, or `detailed`.
820    ///
821    /// `concise` is supported for `computer-use-preview` models and all reasoning models after
822    /// `gpt-5`.
823    #[serde(skip_serializing_if = "Option::is_none")]
824    pub summary: Option<ReasoningSummary>,
825}
826
827/// o-series reasoning settings.
828#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
829#[serde(rename_all = "lowercase")]
830pub enum Verbosity {
831    Low,
832    Medium,
833    High,
834}
835
836#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
837#[serde(rename_all = "lowercase")]
838pub enum ReasoningSummary {
839    Auto,
840    Concise,
841    Detailed,
842}
843
844/// The retention policy for the prompt cache.
845#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
846pub enum PromptCacheRetention {
847    #[serde(rename = "in-memory")]
848    InMemory,
849    #[serde(rename = "24h")]
850    Hours24,
851}
852
853/// Configuration for text response format.
854#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
855pub struct ResponseTextParam {
856    /// An object specifying the format that the model must output.
857    ///
858    /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
859    /// which ensures the model will match your supplied JSON schema. Learn more in the
860    /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
861    ///
862    /// The default format is `{ "type": "text" }` with no additional options.
863    ///
864    /// **Not recommended for gpt-4o and newer models:**
865    ///
866    /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
867    /// ensures the message the model generates is valid JSON. Using `json_schema`
868    /// is preferred for models that support it.
869    pub format: TextResponseFormatConfiguration,
870
871    /// Constrains the verbosity of the model's response. Lower values will result in
872    /// more concise responses, while higher values will result in more verbose responses.
873    ///
874    /// Currently supported values are `low`, `medium`, and `high`.
875    #[serde(skip_serializing_if = "Option::is_none")]
876    pub verbosity: Option<Verbosity>,
877}
878
879#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
880#[serde(tag = "type", rename_all = "snake_case")]
881pub enum TextResponseFormatConfiguration {
882    /// Default response format. Used to generate text responses.
883    Text,
884    /// JSON object response format. An older method of generating JSON responses.
885    /// Using `json_schema` is recommended for models that support it.
886    /// Note that the model will not generate JSON without a system or user message
887    /// instructing it to do so.
888    JsonObject,
889    /// JSON Schema response format. Used to generate structured JSON responses.
890    /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
891    JsonSchema(ResponseFormatJsonSchema),
892}
893
894/// Definitions for model-callable tools.
895#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
896#[serde(tag = "type", rename_all = "snake_case")]
897pub enum Tool {
898    /// Defines a function in your own code the model can choose to call. Learn more about [function
899    /// calling](https://platform.openai.com/docs/guides/tools).
900    Function(FunctionTool),
901    /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
902    /// tool](https://platform.openai.com/docs/guides/tools-file-search).
903    FileSearch(FileSearchTool),
904    /// A tool that controls a virtual computer. Learn more about the [computer
905    /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
906    ComputerUsePreview(ComputerUsePreviewTool),
907    /// Search the Internet for sources related to the prompt. Learn more about the
908    /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
909    WebSearch(WebSearchTool),
910    /// type: web_search_2025_08_26
911    #[serde(rename = "web_search_2025_08_26")]
912    WebSearch20250826(WebSearchTool),
913    /// Give the model access to additional tools via remote Model Context Protocol
914    /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
915    Mcp(MCPTool),
916    /// A tool that runs Python code to help generate a response to a prompt.
917    CodeInterpreter(CodeInterpreterTool),
918    /// A tool that generates images using a model like `gpt-image-1`.
919    ImageGeneration(ImageGenTool),
920    /// A tool that allows the model to execute shell commands in a local environment.
921    LocalShell,
922    /// A tool that allows the model to execute shell commands.
923    Shell,
924    /// A custom tool that processes input using a specified format. Learn more about   [custom
925    /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
926    Custom(CustomToolParam),
927    /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
928    ///tool](https://platform.openai.com/docs/guides/tools-web-search).
929    WebSearchPreview(WebSearchTool),
930    /// type: web_search_preview_2025_03_11
931    #[serde(rename = "web_search_preview_2025_03_11")]
932    WebSearchPreview20250311(WebSearchTool),
933    /// Allows the assistant to create, delete, or update files using unified diffs.
934    ApplyPatch,
935}
936
937#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
938pub struct CustomToolParam {
939    /// The name of the custom tool, used to identify it in tool calls.
940    pub name: String,
941    /// Optional description of the custom tool, used to provide more context.
942    pub description: Option<String>,
943    /// The input format for the custom tool. Default is unconstrained text.
944    pub format: CustomToolParamFormat,
945}
946
947#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
948#[serde(tag = "type", rename_all = "lowercase")]
949pub enum CustomToolParamFormat {
950    /// Unconstrained free-form text.
951    #[default]
952    Text,
953    /// A grammar defined by the user.
954    Grammar(CustomGrammarFormatParam),
955}
956
957#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
958#[builder(
959    name = "FileSearchToolArgs",
960    pattern = "mutable",
961    setter(into, strip_option),
962    default
963)]
964#[builder(build_fn(error = "OpenAIError"))]
965pub struct FileSearchTool {
966    /// The IDs of the vector stores to search.
967    pub vector_store_ids: Vec<String>,
968    /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
969    #[serde(skip_serializing_if = "Option::is_none")]
970    pub max_num_results: Option<u32>,
971    /// A filter to apply.
972    #[serde(skip_serializing_if = "Option::is_none")]
973    pub filters: Option<Filter>,
974    /// Ranking options for search.
975    #[serde(skip_serializing_if = "Option::is_none")]
976    pub ranking_options: Option<RankingOptions>,
977}
978
979#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
980#[builder(
981    name = "FunctionToolArgs",
982    pattern = "mutable",
983    setter(into, strip_option),
984    default
985)]
986pub struct FunctionTool {
987    /// The name of the function to call.
988    pub name: String,
989    /// A JSON schema object describing the parameters of the function.
990    #[serde(skip_serializing_if = "Option::is_none")]
991    pub parameters: Option<serde_json::Value>,
992    /// Whether to enforce strict parameter validation. Default `true`.
993    #[serde(skip_serializing_if = "Option::is_none")]
994    pub strict: Option<bool>,
995    /// A description of the function. Used by the model to determine whether or not to call the
996    /// function.
997    #[serde(skip_serializing_if = "Option::is_none")]
998    pub description: Option<String>,
999}
1000
1001#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1002pub struct WebSearchToolFilters {
1003    /// Allowed domains for the search. If not provided, all domains are allowed.
1004    /// Subdomains of the provided domains are allowed as well.
1005    ///
1006    /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1007    #[serde(skip_serializing_if = "Option::is_none")]
1008    pub allowed_domains: Option<Vec<String>>,
1009}
1010
1011#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1012#[builder(
1013    name = "WebSearchToolArgs",
1014    pattern = "mutable",
1015    setter(into, strip_option),
1016    default
1017)]
1018pub struct WebSearchTool {
1019    /// Filters for the search.
1020    #[serde(skip_serializing_if = "Option::is_none")]
1021    pub filters: Option<WebSearchToolFilters>,
1022    /// The approximate location of the user.
1023    #[serde(skip_serializing_if = "Option::is_none")]
1024    pub user_location: Option<WebSearchApproximateLocation>,
1025    /// High level guidance for the amount of context window space to use for the search. One of `low`,
1026    /// `medium`, or `high`. `medium` is the default.
1027    #[serde(skip_serializing_if = "Option::is_none")]
1028    pub search_context_size: Option<WebSearchToolSearchContextSize>,
1029}
1030
1031#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1032#[serde(rename_all = "lowercase")]
1033pub enum WebSearchToolSearchContextSize {
1034    Low,
1035    #[default]
1036    Medium,
1037    High,
1038}
1039
1040#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1041#[serde(rename_all = "lowercase")]
1042pub enum ComputerEnvironment {
1043    Windows,
1044    Mac,
1045    Linux,
1046    Ubuntu,
1047    #[default]
1048    Browser,
1049}
1050
1051#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1052#[builder(
1053    name = "ComputerUsePreviewToolArgs",
1054    pattern = "mutable",
1055    setter(into, strip_option),
1056    default
1057)]
1058pub struct ComputerUsePreviewTool {
1059    /// The type of computer environment to control.
1060    environment: ComputerEnvironment,
1061    /// The width of the computer display.
1062    display_width: u32,
1063    /// The height of the computer display.
1064    display_height: u32,
1065}
1066
1067#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1068pub enum RankVersionType {
1069    #[serde(rename = "auto")]
1070    Auto,
1071    #[serde(rename = "default-2024-11-15")]
1072    Default20241115,
1073}
1074
1075#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1076pub struct HybridSearch {
1077    /// The weight of the embedding in the reciprocal ranking fusion.
1078    pub embedding_weight: f32,
1079    /// The weight of the text in the reciprocal ranking fusion.
1080    pub text_weight: f32,
1081}
1082
1083/// Options for search result ranking.
1084#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1085pub struct RankingOptions {
1086    /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1087    /// sparse keyword matches when hybrid search is enabled.
1088    #[serde(skip_serializing_if = "Option::is_none")]
1089    pub hybrid_search: Option<HybridSearch>,
1090    /// The ranker to use for the file search.
1091    pub ranker: RankVersionType,
1092    /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1093    /// attempt to return only the most relevant results, but may return fewer results.
1094    #[serde(skip_serializing_if = "Option::is_none")]
1095    pub score_threshold: Option<f32>,
1096}
1097
1098#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1099#[serde(rename_all = "lowercase")]
1100pub enum WebSearchApproximateLocationType {
1101    #[default]
1102    Approximate,
1103}
1104
1105/// Approximate user location for web search.
1106#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1107#[builder(
1108    name = "WebSearchApproximateLocationArgs",
1109    pattern = "mutable",
1110    setter(into, strip_option),
1111    default
1112)]
1113#[builder(build_fn(error = "OpenAIError"))]
1114pub struct WebSearchApproximateLocation {
1115    /// The type of location approximation. Always `approximate`.
1116    pub r#type: WebSearchApproximateLocationType,
1117    /// Free text input for the city of the user, e.g. `San Francisco`.
1118    #[serde(skip_serializing_if = "Option::is_none")]
1119    pub city: Option<String>,
1120    /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1121    /// e.g. `US`.
1122    #[serde(skip_serializing_if = "Option::is_none")]
1123    pub country: Option<String>,
1124    /// Free text input for the region of the user, e.g. `California`.
1125    #[serde(skip_serializing_if = "Option::is_none")]
1126    pub region: Option<String>,
1127    /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1128    /// `America/Los_Angeles`.
1129    #[serde(skip_serializing_if = "Option::is_none")]
1130    pub timezone: Option<String>,
1131}
1132
1133/// Container configuration for a code interpreter.
1134#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1135#[serde(tag = "type", rename_all = "snake_case")]
1136pub enum CodeInterpreterToolContainer {
1137    /// Configuration for a code interpreter container. Optionally specify the IDs of the
1138    /// files to run the code on.
1139    Auto(CodeInterpreterContainerAuto),
1140
1141    /// The container ID.
1142    #[serde(untagged)]
1143    ContainerID(String),
1144}
1145
1146/// Auto configuration for code interpreter container.
1147#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1148pub struct CodeInterpreterContainerAuto {
1149    /// An optional list of uploaded files to make available to your code.
1150    #[serde(skip_serializing_if = "Option::is_none")]
1151    pub file_ids: Option<Vec<String>>,
1152
1153    #[serde(skip_serializing_if = "Option::is_none")]
1154    pub memory_limit: Option<u64>,
1155}
1156
1157#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1158#[builder(
1159    name = "CodeInterpreterToolArgs",
1160    pattern = "mutable",
1161    setter(into, strip_option),
1162    default
1163)]
1164#[builder(build_fn(error = "OpenAIError"))]
1165pub struct CodeInterpreterTool {
1166    /// The code interpreter container. Can be a container ID or an object that
1167    /// specifies uploaded file IDs to make available to your code, along with an
1168    /// optional `memory_limit` setting.
1169    pub container: CodeInterpreterToolContainer,
1170}
1171
1172#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1173pub struct ImageGenToolInputImageMask {
1174    /// Base64-encoded mask image.
1175    #[serde(skip_serializing_if = "Option::is_none")]
1176    pub image_url: Option<String>,
1177    /// File ID for the mask image.
1178    #[serde(skip_serializing_if = "Option::is_none")]
1179    pub file_id: Option<String>,
1180}
1181
1182#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1183#[serde(rename_all = "lowercase")]
1184pub enum InputFidelity {
1185    #[default]
1186    High,
1187    Low,
1188}
1189
1190#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1191#[serde(rename_all = "lowercase")]
1192pub enum ImageGenToolModeration {
1193    #[default]
1194    Auto,
1195    Low,
1196}
1197
1198/// Image generation tool definition.
1199#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1200#[builder(
1201    name = "ImageGenerationArgs",
1202    pattern = "mutable",
1203    setter(into, strip_option),
1204    default
1205)]
1206#[builder(build_fn(error = "OpenAIError"))]
1207pub struct ImageGenTool {
1208    /// Background type for the generated image. One of `transparent`,
1209    /// `opaque`, or `auto`. Default: `auto`.
1210    #[serde(skip_serializing_if = "Option::is_none")]
1211    pub background: Option<ImageGenToolBackground>,
1212    /// Control how much effort the model will exert to match the style and features, especially facial features,
1213    /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1214    /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1215    #[serde(skip_serializing_if = "Option::is_none")]
1216    pub input_fidelity: Option<InputFidelity>,
1217    /// Optional mask for inpainting. Contains `image_url`
1218    /// (string, optional) and `file_id` (string, optional).
1219    #[serde(skip_serializing_if = "Option::is_none")]
1220    pub input_image_mask: Option<ImageGenToolInputImageMask>,
1221    /// The image generation model to use. Default: `gpt-image-1`.
1222    #[serde(skip_serializing_if = "Option::is_none")]
1223    pub model: Option<String>,
1224    /// Moderation level for the generated image. Default: `auto`.
1225    #[serde(skip_serializing_if = "Option::is_none")]
1226    pub moderation: Option<ImageGenToolModeration>,
1227    /// Compression level for the output image. Default: 100.
1228    #[serde(skip_serializing_if = "Option::is_none")]
1229    pub output_compression: Option<u8>,
1230    /// The output format of the generated image. One of `png`, `webp`, or
1231    /// `jpeg`. Default: `png`.
1232    #[serde(skip_serializing_if = "Option::is_none")]
1233    pub output_format: Option<ImageGenToolOutputFormat>,
1234    /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1235    #[serde(skip_serializing_if = "Option::is_none")]
1236    pub partial_images: Option<u8>,
1237    /// The quality of the generated image. One of `low`, `medium`, `high`,
1238    /// or `auto`. Default: `auto`.
1239    #[serde(skip_serializing_if = "Option::is_none")]
1240    pub quality: Option<ImageGenToolQuality>,
1241    /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1242    /// `1536x1024`, or `auto`. Default: `auto`.
1243    #[serde(skip_serializing_if = "Option::is_none")]
1244    pub size: Option<ImageGenToolSize>,
1245}
1246
1247#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1248#[serde(rename_all = "lowercase")]
1249pub enum ImageGenToolBackground {
1250    Transparent,
1251    Opaque,
1252    #[default]
1253    Auto,
1254}
1255
1256#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1257#[serde(rename_all = "lowercase")]
1258pub enum ImageGenToolOutputFormat {
1259    #[default]
1260    Png,
1261    Webp,
1262    Jpeg,
1263}
1264
1265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1266#[serde(rename_all = "lowercase")]
1267pub enum ImageGenToolQuality {
1268    Low,
1269    Medium,
1270    High,
1271    #[default]
1272    Auto,
1273}
1274
1275#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1276#[serde(rename_all = "lowercase")]
1277pub enum ImageGenToolSize {
1278    #[default]
1279    Auto,
1280    #[serde(rename = "1024x1024")]
1281    Size1024x1024,
1282    #[serde(rename = "1024x1536")]
1283    Size1024x1536,
1284    #[serde(rename = "1536x1024")]
1285    Size1536x1024,
1286}
1287
1288#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1289#[serde(rename_all = "lowercase")]
1290pub enum ToolChoiceAllowedMode {
1291    Auto,
1292    Required,
1293}
1294
1295#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1296pub struct ToolChoiceAllowed {
1297    /// Constrains the tools available to the model to a pre-defined set.
1298    ///
1299    /// `auto` allows the model to pick from among the allowed tools and generate a
1300    /// message.
1301    ///
1302    /// `required` requires the model to call one or more of the allowed tools.
1303    pub mode: ToolChoiceAllowedMode,
1304    /// A list of tool definitions that the model should be allowed to call.
1305    ///
1306    /// For the Responses API, the list of tool definitions might look like:
1307    /// ```json
1308    /// [
1309    ///   { "type": "function", "name": "get_weather" },
1310    ///   { "type": "mcp", "server_label": "deepwiki" },
1311    ///   { "type": "image_generation" }
1312    /// ]
1313    /// ```
1314    pub tools: Vec<serde_json::Value>,
1315}
1316
1317/// The type of hosted tool the model should to use. Learn more about
1318/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1319#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1320#[serde(tag = "type", rename_all = "snake_case")]
1321pub enum ToolChoiceTypes {
1322    FileSearch,
1323    WebSearchPreview,
1324    ComputerUsePreview,
1325    CodeInterpreter,
1326    ImageGeneration,
1327}
1328
1329#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1330pub struct ToolChoiceFunction {
1331    /// The name of the function to call.
1332    pub name: String,
1333}
1334
1335#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1336pub struct ToolChoiceMCP {
1337    /// The name of the tool to call on the server.
1338    pub name: String,
1339    /// The label of the MCP server to use.
1340    pub server_label: String,
1341}
1342
1343#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1344pub struct ToolChoiceCustom {
1345    /// The name of the custom tool to call.
1346    pub name: String,
1347}
1348
1349#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1350#[serde(tag = "type", rename_all = "snake_case")]
1351pub enum ToolChoiceParam {
1352    /// Constrains the tools available to the model to a pre-defined set.
1353    AllowedTools(ToolChoiceAllowed),
1354
1355    /// Use this option to force the model to call a specific function.
1356    Function(ToolChoiceFunction),
1357
1358    /// Use this option to force the model to call a specific tool on a remote MCP server.
1359    Mcp(ToolChoiceMCP),
1360
1361    /// Use this option to force the model to call a custom tool.
1362    Custom(ToolChoiceCustom),
1363
1364    /// Forces the model to call the apply_patch tool when executing a tool call.
1365    ApplyPatch,
1366
1367    /// Forces the model to call the function shell tool when a tool call is required.
1368    Shell,
1369
1370    /// Indicates that the model should use a built-in tool to generate a response.
1371    /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1372    #[serde(untagged)]
1373    Hosted(ToolChoiceTypes),
1374
1375    /// Controls which (if any) tool is called by the model.
1376    ///
1377    /// `none` means the model will not call any tool and instead generates a message.
1378    ///
1379    /// `auto` means the model can pick between generating a message or calling one or
1380    /// more tools.
1381    ///
1382    /// `required` means the model must call one or more tools.
1383    #[serde(untagged)]
1384    Mode(ToolChoiceOptions),
1385}
1386
1387#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1388#[serde(rename_all = "lowercase")]
1389pub enum ToolChoiceOptions {
1390    None,
1391    Auto,
1392    Required,
1393}
1394
1395/// Error returned by the API when a request fails.
1396#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1397pub struct ErrorObject {
1398    /// The error code for the response.
1399    pub code: String,
1400    /// A human-readable description of the error.
1401    pub message: String,
1402}
1403
1404/// Details about an incomplete response.
1405#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1406pub struct IncompleteDetails {
1407    /// The reason why the response is incomplete.
1408    pub reason: String,
1409}
1410
1411#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1412pub struct TopLogProb {
1413    pub bytes: Vec<u8>,
1414    pub logprob: f64,
1415    pub token: String,
1416}
1417
1418#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1419pub struct LogProb {
1420    pub bytes: Vec<u8>,
1421    pub logprob: f64,
1422    pub token: String,
1423    pub top_logprobs: Vec<TopLogProb>,
1424}
1425
1426#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1427pub struct ResponseTopLobProb {
1428    /// The log probability of this token.
1429    pub logprob: f64,
1430    /// A possible text token.
1431    pub token: String,
1432}
1433
1434#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1435pub struct ResponseLogProb {
1436    /// The log probability of this token.
1437    pub logprob: f64,
1438    /// A possible text token.
1439    pub token: String,
1440    /// The log probability of the top 20 most likely tokens.
1441    pub top_logprobs: Vec<ResponseTopLobProb>,
1442}
1443
1444/// A simple text output from the model.
1445#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1446pub struct OutputTextContent {
1447    /// The annotations of the text output.
1448    pub annotations: Vec<Annotation>,
1449    pub logprobs: Option<Vec<LogProb>>,
1450    /// The text output from the model.
1451    pub text: String,
1452}
1453
1454#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1455#[serde(tag = "type", rename_all = "snake_case")]
1456pub enum Annotation {
1457    /// A citation to a file.
1458    FileCitation(FileCitationBody),
1459    /// A citation for a web resource used to generate a model response.
1460    UrlCitation(UrlCitationBody),
1461    /// A citation for a container file used to generate a model response.
1462    ContainerFileCitation(ContainerFileCitationBody),
1463    /// A path to a file.
1464    FilePath(FilePath),
1465}
1466
1467#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1468pub struct FileCitationBody {
1469    /// The ID of the file.
1470    file_id: String,
1471    /// The filename of the file cited.
1472    filename: String,
1473    /// The index of the file in the list of files.
1474    index: u32,
1475}
1476
1477#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1478pub struct UrlCitationBody {
1479    /// The index of the last character of the URL citation in the message.
1480    end_index: u32,
1481    /// The index of the first character of the URL citation in the message.
1482    start_index: u32,
1483    /// The title of the web resource.
1484    title: String,
1485    /// The URL of the web resource.
1486    url: String,
1487}
1488
1489#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1490pub struct ContainerFileCitationBody {
1491    /// The ID of the container file.
1492    container_id: String,
1493    /// The index of the last character of the container file citation in the message.
1494    end_index: u32,
1495    /// The ID of the file.
1496    file_id: String,
1497    /// The filename of the container file cited.
1498    filename: String,
1499    /// The index of the first character of the container file citation in the message.
1500    start_index: u32,
1501}
1502
1503#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1504pub struct FilePath {
1505    /// The ID of the file.
1506    file_id: String,
1507    /// The index of the file in the list of files.
1508    index: u32,
1509}
1510
1511/// A refusal explanation from the model.
1512#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1513pub struct RefusalContent {
1514    /// The refusal explanation from the model.
1515    pub refusal: String,
1516}
1517
1518/// A message generated by the model.
1519#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1520pub struct OutputMessage {
1521    /// The content of the output message.
1522    pub content: Vec<OutputMessageContent>,
1523    /// The unique ID of the output message.
1524    pub id: String,
1525    /// The role of the output message. Always `assistant`.
1526    pub role: AssistantRole,
1527    /// The status of the message input. One of `in_progress`, `completed`, or
1528    /// `incomplete`. Populated when input items are returned via API.
1529    pub status: OutputStatus,
1530    ///// The type of the output message. Always `message`.
1531    //pub r#type: MessageType,
1532}
1533
1534#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1535#[serde(rename_all = "lowercase")]
1536pub enum MessageType {
1537    #[default]
1538    Message,
1539}
1540
1541/// The role for an output message - always `assistant`.
1542/// This type ensures type safety by only allowing the assistant role.
1543#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1544#[serde(rename_all = "lowercase")]
1545pub enum AssistantRole {
1546    #[default]
1547    Assistant,
1548}
1549
1550#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1551#[serde(tag = "type", rename_all = "snake_case")]
1552pub enum OutputMessageContent {
1553    /// A text output from the model.
1554    OutputText(OutputTextContent),
1555    /// A refusal from the model.
1556    Refusal(RefusalContent),
1557}
1558
1559#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1560#[serde(tag = "type", rename_all = "snake_case")]
1561pub enum OutputContent {
1562    /// A text output from the model.
1563    OutputText(OutputTextContent),
1564    /// A refusal from the model.
1565    Refusal(RefusalContent),
1566    /// Reasoning text from the model.
1567    ReasoningText(ReasoningTextContent),
1568}
1569
1570#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1571pub struct ReasoningTextContent {
1572    /// The reasoning text from the model.
1573    pub text: String,
1574}
1575
1576/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1577#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1578pub struct ReasoningItem {
1579    /// Unique identifier of the reasoning content.
1580    pub id: String,
1581    /// Reasoning summary content.
1582    pub summary: Vec<SummaryPart>,
1583    /// Reasoning text content.
1584    #[serde(skip_serializing_if = "Option::is_none")]
1585    pub content: Option<Vec<ReasoningTextContent>>,
1586    /// The encrypted content of the reasoning item - populated when a response is generated with
1587    /// `reasoning.encrypted_content` in the `include` parameter.
1588    #[serde(skip_serializing_if = "Option::is_none")]
1589    pub encrypted_content: Option<String>,
1590    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1591    /// Populated when items are returned via API.
1592    #[serde(skip_serializing_if = "Option::is_none")]
1593    pub status: Option<OutputStatus>,
1594}
1595
1596/// A single summary text fragment from reasoning.
1597#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1598pub struct Summary {
1599    /// A summary of the reasoning output from the model so far.
1600    pub text: String,
1601}
1602
1603#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1604#[serde(tag = "type", rename_all = "snake_case")]
1605pub enum SummaryPart {
1606    SummaryText(Summary),
1607}
1608
1609/// File search tool call output.
1610#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1611pub struct FileSearchToolCall {
1612    /// The unique ID of the file search tool call.
1613    pub id: String,
1614    /// The queries used to search for files.
1615    pub queries: Vec<String>,
1616    /// The status of the file search tool call. One of `in_progress`, `searching`,
1617    /// `incomplete`,`failed`, or `completed`.
1618    pub status: FileSearchToolCallStatus,
1619    /// The results of the file search tool call.
1620    #[serde(skip_serializing_if = "Option::is_none")]
1621    pub results: Option<Vec<FileSearchToolCallResult>>,
1622}
1623
1624#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1625#[serde(rename_all = "snake_case")]
1626pub enum FileSearchToolCallStatus {
1627    InProgress,
1628    Searching,
1629    Incomplete,
1630    Failed,
1631    Completed,
1632}
1633
1634/// A single result from a file search.
1635#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1636pub struct FileSearchToolCallResult {
1637    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1638    /// additional information about the object in a structured format, and querying for objects
1639    /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1640    /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1641    pub attributes: HashMap<String, serde_json::Value>,
1642    /// The unique ID of the file.
1643    pub file_id: String,
1644    /// The name of the file.
1645    pub filename: String,
1646    /// The relevance score of the file - a value between 0 and 1.
1647    pub score: f32,
1648    /// The text that was retrieved from the file.
1649    pub text: String,
1650}
1651
1652#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1653pub struct ComputerCallSafetyCheckParam {
1654    /// The ID of the pending safety check.
1655    pub id: String,
1656    /// The type of the pending safety check.
1657    #[serde(skip_serializing_if = "Option::is_none")]
1658    pub code: Option<String>,
1659    /// Details about the pending safety check.
1660    #[serde(skip_serializing_if = "Option::is_none")]
1661    pub message: Option<String>,
1662}
1663
1664#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1665#[serde(rename_all = "snake_case")]
1666pub enum WebSearchToolCallStatus {
1667    InProgress,
1668    Searching,
1669    Completed,
1670    Failed,
1671}
1672
1673#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1674pub struct WebSearchActionSearchSource {
1675    /// The type of source. Always `url`.
1676    pub r#type: String,
1677    /// The URL of the source.
1678    pub url: String,
1679}
1680
1681#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1682pub struct WebSearchActionSearch {
1683    /// The search query.
1684    pub query: String,
1685    /// The sources used in the search.
1686    pub sources: Option<Vec<WebSearchActionSearchSource>>,
1687}
1688
1689#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1690pub struct WebSearchActionOpenPage {
1691    /// The URL opened by the model.
1692    pub url: String,
1693}
1694
1695#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1696pub struct WebSearchActionFind {
1697    /// The URL of the page searched for the pattern.
1698    pub url: String,
1699    /// The pattern or text to search for within the page.
1700    pub pattern: String,
1701}
1702
1703#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1704#[serde(tag = "type", rename_all = "snake_case")]
1705pub enum WebSearchToolCallAction {
1706    /// Action type "search" - Performs a web search query.
1707    Search(WebSearchActionSearch),
1708    /// Action type "open_page" - Opens a specific URL from search results.
1709    OpenPage(WebSearchActionOpenPage),
1710    /// Action type "find": Searches for a pattern within a loaded page.
1711    Find(WebSearchActionFind),
1712}
1713
1714/// Web search tool call output.
1715#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1716pub struct WebSearchToolCall {
1717    /// An object describing the specific action taken in this web search call. Includes
1718    /// details on how the model used the web (search, open_page, find).
1719    pub action: WebSearchToolCallAction,
1720    /// The unique ID of the web search tool call.
1721    pub id: String,
1722    /// The status of the web search tool call.
1723    pub status: WebSearchToolCallStatus,
1724}
1725
1726/// Output from a computer tool call.
1727#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1728pub struct ComputerToolCall {
1729    pub action: ComputerAction,
1730    /// An identifier used when responding to the tool call with output.
1731    pub call_id: String,
1732    /// The unique ID of the computer call.
1733    pub id: String,
1734    /// The pending safety checks for the computer call.
1735    pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1736    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1737    /// Populated when items are returned via API.
1738    pub status: OutputStatus,
1739}
1740
1741/// A point in 2D space.
1742#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1743pub struct DragPoint {
1744    /// The x-coordinate.
1745    pub x: i32,
1746    /// The y-coordinate.
1747    pub y: i32,
1748}
1749
1750/// Represents all user‐triggered actions.
1751#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1752#[serde(tag = "type", rename_all = "snake_case")]
1753pub enum ComputerAction {
1754    /// A click action.
1755    Click(ClickParam),
1756
1757    /// A double click action.
1758    DoubleClick(DoubleClickAction),
1759
1760    /// A drag action.
1761    Drag(Drag),
1762
1763    /// A collection of keypresses the model would like to perform.
1764    Keypress(KeyPressAction),
1765
1766    /// A mouse move action.
1767    Move(Move),
1768
1769    /// A screenshot action.
1770    Screenshot,
1771
1772    /// A scroll action.
1773    Scroll(Scroll),
1774
1775    /// An action to type in text.
1776    Type(Type),
1777
1778    /// A wait action.
1779    Wait,
1780}
1781
1782#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1783#[serde(rename_all = "lowercase")]
1784pub enum ClickButtonType {
1785    Left,
1786    Right,
1787    Wheel,
1788    Back,
1789    Forward,
1790}
1791
1792/// A click action.
1793#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1794pub struct ClickParam {
1795    /// Indicates which mouse button was pressed during the click. One of `left`,
1796    /// `right`, `wheel`, `back`, or `forward`.
1797    pub button: ClickButtonType,
1798    /// The x-coordinate where the click occurred.
1799    pub x: i32,
1800    /// The y-coordinate where the click occurred.
1801    pub y: i32,
1802}
1803
1804/// A double click action.
1805#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1806pub struct DoubleClickAction {
1807    /// The x-coordinate where the double click occurred.
1808    pub x: i32,
1809    /// The y-coordinate where the double click occurred.
1810    pub y: i32,
1811}
1812
1813/// A drag action.
1814#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1815pub struct Drag {
1816    /// The path of points the cursor drags through.
1817    pub path: Vec<DragPoint>,
1818}
1819
1820/// A keypress action.
1821#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1822pub struct KeyPressAction {
1823    /// The combination of keys the model is requesting to be pressed.
1824    /// This is an array of strings, each representing a key.
1825    pub keys: Vec<String>,
1826}
1827
1828/// A mouse move action.
1829#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1830pub struct Move {
1831    /// The x-coordinate to move to.
1832    pub x: i32,
1833    /// The y-coordinate to move to.
1834    pub y: i32,
1835}
1836
1837/// A scroll action.
1838#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1839pub struct Scroll {
1840    /// The horizontal scroll distance.
1841    pub scroll_x: i32,
1842    /// The vertical scroll distance.
1843    pub scroll_y: i32,
1844    /// The x-coordinate where the scroll occurred.
1845    pub x: i32,
1846    /// The y-coordinate where the scroll occurred.
1847    pub y: i32,
1848}
1849
1850/// A typing (text entry) action.
1851#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1852pub struct Type {
1853    /// The text to type.
1854    pub text: String,
1855}
1856
1857#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1858pub struct FunctionToolCall {
1859    /// A JSON string of the arguments to pass to the function.
1860    pub arguments: String,
1861    /// The unique ID of the function tool call generated by the model.
1862    pub call_id: String,
1863    /// The name of the function to run.
1864    pub name: String,
1865    /// The unique ID of the function tool call.
1866    #[serde(skip_serializing_if = "Option::is_none")]
1867    pub id: Option<String>,
1868    /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1869    /// Populated when items are returned via API.
1870    #[serde(skip_serializing_if = "Option::is_none")]
1871    pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1872}
1873
1874#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1875#[serde(rename_all = "snake_case")]
1876pub enum ImageGenToolCallStatus {
1877    InProgress,
1878    Completed,
1879    Generating,
1880    Failed,
1881}
1882
1883#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1884pub struct ImageGenToolCall {
1885    /// The unique ID of the image generation call.
1886    pub id: String,
1887    /// The generated image encoded in base64.
1888    pub result: Option<String>,
1889    /// The status of the image generation call.
1890    pub status: ImageGenToolCallStatus,
1891}
1892
1893#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1894#[serde(rename_all = "snake_case")]
1895pub enum CodeInterpreterToolCallStatus {
1896    InProgress,
1897    Completed,
1898    Incomplete,
1899    Interpreting,
1900    Failed,
1901}
1902
1903/// Output of a code interpreter request.
1904#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1905pub struct CodeInterpreterToolCall {
1906    /// The code to run, or null if not available.
1907    #[serde(skip_serializing_if = "Option::is_none")]
1908    pub code: Option<String>,
1909    /// ID of the container used to run the code.
1910    pub container_id: String,
1911    /// The unique ID of the code interpreter tool call.
1912    pub id: String,
1913    /// The outputs generated by the code interpreter, such as logs or images.
1914    /// Can be null if no outputs are available.
1915    #[serde(skip_serializing_if = "Option::is_none")]
1916    pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1917    /// The status of the code interpreter tool call.
1918    /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1919    pub status: CodeInterpreterToolCallStatus,
1920}
1921
1922/// Individual result from a code interpreter: either logs or files.
1923#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1924#[serde(tag = "type", rename_all = "snake_case")]
1925pub enum CodeInterpreterToolCallOutput {
1926    /// Code interpreter output logs
1927    Logs(CodeInterpreterOutputLogs),
1928    /// Code interpreter output image
1929    Image(CodeInterpreterOutputImage),
1930}
1931
1932#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1933pub struct CodeInterpreterOutputLogs {
1934    /// The logs output from the code interpreter.
1935    pub logs: String,
1936}
1937
1938#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1939pub struct CodeInterpreterOutputImage {
1940    /// The URL of the image output from the code interpreter.
1941    pub url: String,
1942}
1943
1944#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1945pub struct CodeInterpreterFile {
1946    /// The ID of the file.
1947    file_id: String,
1948    /// The MIME type of the file.
1949    mime_type: String,
1950}
1951
1952#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1953pub struct LocalShellToolCall {
1954    /// Execute a shell command on the server.
1955    pub action: LocalShellExecAction,
1956    /// The unique ID of the local shell tool call generated by the model.
1957    pub call_id: String,
1958    /// The unique ID of the local shell call.
1959    pub id: String,
1960    /// The status of the local shell call.
1961    pub status: OutputStatus,
1962}
1963
1964/// Define the shape of a local shell action (exec).
1965#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1966pub struct LocalShellExecAction {
1967    /// The command to run.
1968    pub command: Vec<String>,
1969    /// Environment variables to set for the command.
1970    pub env: HashMap<String, String>,
1971    /// Optional timeout in milliseconds for the command.
1972    pub timeout_ms: Option<u64>,
1973    /// Optional user to run the command as.
1974    pub user: Option<String>,
1975    /// Optional working directory to run the command in.
1976    pub working_directory: Option<String>,
1977}
1978
1979/// Commands and limits describing how to run the shell tool call.
1980#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1981pub struct FunctionShellActionParam {
1982    /// Ordered shell commands for the execution environment to run.
1983    pub commands: Vec<String>,
1984    /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
1985    #[serde(skip_serializing_if = "Option::is_none")]
1986    pub timeout_ms: Option<u64>,
1987    /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
1988    #[serde(skip_serializing_if = "Option::is_none")]
1989    pub max_output_length: Option<u64>,
1990}
1991
1992/// Status values reported for shell tool calls.
1993#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1994#[serde(rename_all = "snake_case")]
1995pub enum FunctionShellCallItemStatus {
1996    InProgress,
1997    Completed,
1998    Incomplete,
1999}
2000
2001/// A tool representing a request to execute one or more shell commands.
2002#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2003pub struct FunctionShellCallItemParam {
2004    /// The unique ID of the shell tool call. Populated when this item is returned via API.
2005    #[serde(skip_serializing_if = "Option::is_none")]
2006    pub id: Option<String>,
2007    /// The unique ID of the shell tool call generated by the model.
2008    pub call_id: String,
2009    /// The shell commands and limits that describe how to run the tool call.
2010    pub action: FunctionShellActionParam,
2011    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2012    #[serde(skip_serializing_if = "Option::is_none")]
2013    pub status: Option<FunctionShellCallItemStatus>,
2014}
2015
2016/// Indicates that the shell commands finished and returned an exit code.
2017#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2018pub struct FunctionShellCallOutputExitOutcomeParam {
2019    /// The exit code returned by the shell process.
2020    pub exit_code: i32,
2021}
2022
2023/// The exit or timeout outcome associated with this chunk.
2024#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2025#[serde(tag = "type", rename_all = "snake_case")]
2026pub enum FunctionShellCallOutputOutcomeParam {
2027    Timeout,
2028    Exit(FunctionShellCallOutputExitOutcomeParam),
2029}
2030
2031/// Captured stdout and stderr for a portion of a shell tool call output.
2032#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2033pub struct FunctionShellCallOutputContentParam {
2034    /// Captured stdout output for this chunk of the shell call.
2035    pub stdout: String,
2036    /// Captured stderr output for this chunk of the shell call.
2037    pub stderr: String,
2038    /// The exit or timeout outcome associated with this chunk.
2039    pub outcome: FunctionShellCallOutputOutcomeParam,
2040}
2041
2042/// The streamed output items emitted by a shell tool call.
2043#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2044pub struct FunctionShellCallOutputItemParam {
2045    /// The unique ID of the shell tool call output. Populated when this item is returned via API.
2046    #[serde(skip_serializing_if = "Option::is_none")]
2047    pub id: Option<String>,
2048    /// The unique ID of the shell tool call generated by the model.
2049    pub call_id: String,
2050    /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2051    pub output: Vec<FunctionShellCallOutputContentParam>,
2052    /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2053    #[serde(skip_serializing_if = "Option::is_none")]
2054    pub max_output_length: Option<u64>,
2055}
2056
2057/// Status values reported for apply_patch tool calls.
2058#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2059#[serde(rename_all = "snake_case")]
2060pub enum ApplyPatchCallStatusParam {
2061    InProgress,
2062    Completed,
2063}
2064
2065/// Instruction for creating a new file via the apply_patch tool.
2066#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2067pub struct ApplyPatchCreateFileOperationParam {
2068    /// Path of the file to create relative to the workspace root.
2069    pub path: String,
2070    /// Unified diff content to apply when creating the file.
2071    pub diff: String,
2072}
2073
2074/// Instruction for deleting an existing file via the apply_patch tool.
2075#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2076pub struct ApplyPatchDeleteFileOperationParam {
2077    /// Path of the file to delete relative to the workspace root.
2078    pub path: String,
2079}
2080
2081/// Instruction for updating an existing file via the apply_patch tool.
2082#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2083pub struct ApplyPatchUpdateFileOperationParam {
2084    /// Path of the file to update relative to the workspace root.
2085    pub path: String,
2086    /// Unified diff content to apply to the existing file.
2087    pub diff: String,
2088}
2089
2090/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2091#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2092#[serde(tag = "type", rename_all = "snake_case")]
2093pub enum ApplyPatchOperationParam {
2094    CreateFile(ApplyPatchCreateFileOperationParam),
2095    DeleteFile(ApplyPatchDeleteFileOperationParam),
2096    UpdateFile(ApplyPatchUpdateFileOperationParam),
2097}
2098
2099/// A tool call representing a request to create, delete, or update files using diff patches.
2100#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2101pub struct ApplyPatchToolCallItemParam {
2102    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2103    #[serde(skip_serializing_if = "Option::is_none")]
2104    pub id: Option<String>,
2105    /// The unique ID of the apply patch tool call generated by the model.
2106    pub call_id: String,
2107    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2108    pub status: ApplyPatchCallStatusParam,
2109    /// The specific create, delete, or update instruction for the apply_patch tool call.
2110    pub operation: ApplyPatchOperationParam,
2111}
2112
2113/// Outcome values reported for apply_patch tool call outputs.
2114#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2115#[serde(rename_all = "snake_case")]
2116pub enum ApplyPatchCallOutputStatusParam {
2117    Completed,
2118    Failed,
2119}
2120
2121/// The streamed output emitted by an apply patch tool call.
2122#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2123pub struct ApplyPatchToolCallOutputItemParam {
2124    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2125    #[serde(skip_serializing_if = "Option::is_none")]
2126    pub id: Option<String>,
2127    /// The unique ID of the apply patch tool call generated by the model.
2128    pub call_id: String,
2129    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2130    pub status: ApplyPatchCallOutputStatusParam,
2131    /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2132    #[serde(skip_serializing_if = "Option::is_none")]
2133    pub output: Option<String>,
2134}
2135
2136/// Shell exec action
2137/// Execute a shell command.
2138#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2139pub struct FunctionShellAction {
2140    /// A list of commands to run.
2141    pub commands: Vec<String>,
2142    /// Optional timeout in milliseconds for the commands.
2143    pub timeout_ms: Option<u64>,
2144    /// Optional maximum number of characters to return from each command.
2145    pub max_output_length: Option<u64>,
2146}
2147
2148/// Status values reported for function shell tool calls.
2149#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2150#[serde(rename_all = "snake_case")]
2151pub enum LocalShellCallStatus {
2152    InProgress,
2153    Completed,
2154    Incomplete,
2155}
2156
2157/// A tool call that executes one or more shell commands in a managed environment.
2158#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2159pub struct FunctionShellCall {
2160    /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2161    pub id: String,
2162    /// The unique ID of the function shell tool call generated by the model.
2163    pub call_id: String,
2164    /// The shell commands and limits that describe how to run the tool call.
2165    pub action: FunctionShellAction,
2166    /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2167    pub status: LocalShellCallStatus,
2168    /// The ID of the entity that created this tool call.
2169    #[serde(skip_serializing_if = "Option::is_none")]
2170    pub created_by: Option<String>,
2171}
2172
2173/// The content of a shell call output.
2174#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2175pub struct FunctionShellCallOutputContent {
2176    pub stdout: String,
2177    pub stderr: String,
2178    /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2179    #[serde(flatten)]
2180    pub outcome: FunctionShellCallOutputOutcome,
2181    #[serde(skip_serializing_if = "Option::is_none")]
2182    pub created_by: Option<String>,
2183}
2184
2185/// Function shell call outcome
2186#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2187#[serde(tag = "type", rename_all = "snake_case")]
2188pub enum FunctionShellCallOutputOutcome {
2189    Timeout,
2190    Exit(FunctionShellCallOutputExitOutcome),
2191}
2192
2193/// Indicates that the shell commands finished and returned an exit code.
2194#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2195pub struct FunctionShellCallOutputExitOutcome {
2196    /// Exit code from the shell process.
2197    pub exit_code: i32,
2198}
2199
2200/// The output of a shell tool call.
2201#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2202pub struct FunctionShellCallOutput {
2203    /// The unique ID of the shell call output. Populated when this item is returned via API.
2204    pub id: String,
2205    /// The unique ID of the shell tool call generated by the model.
2206    pub call_id: String,
2207    /// An array of shell call output contents
2208    pub output: Vec<FunctionShellCallOutputContent>,
2209    /// The maximum length of the shell command output. This is generated by the model and should be
2210    /// passed back with the raw output.
2211    pub max_output_length: Option<u64>,
2212    #[serde(skip_serializing_if = "Option::is_none")]
2213    pub created_by: Option<String>,
2214}
2215
2216/// Status values reported for apply_patch tool calls.
2217#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2218#[serde(rename_all = "snake_case")]
2219pub enum ApplyPatchCallStatus {
2220    InProgress,
2221    Completed,
2222}
2223
2224/// Instruction describing how to create a file via the apply_patch tool.
2225#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2226pub struct ApplyPatchCreateFileOperation {
2227    /// Path of the file to create.
2228    pub path: String,
2229    /// Diff to apply.
2230    pub diff: String,
2231}
2232
2233/// Instruction describing how to delete a file via the apply_patch tool.
2234#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2235pub struct ApplyPatchDeleteFileOperation {
2236    /// Path of the file to delete.
2237    pub path: String,
2238}
2239
2240/// Instruction describing how to update a file via the apply_patch tool.
2241#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2242pub struct ApplyPatchUpdateFileOperation {
2243    /// Path of the file to update.
2244    pub path: String,
2245    /// Diff to apply.
2246    pub diff: String,
2247}
2248
2249/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2250#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2251#[serde(tag = "type", rename_all = "snake_case")]
2252pub enum ApplyPatchOperation {
2253    CreateFile(ApplyPatchCreateFileOperation),
2254    DeleteFile(ApplyPatchDeleteFileOperation),
2255    UpdateFile(ApplyPatchUpdateFileOperation),
2256}
2257
2258/// A tool call that applies file diffs by creating, deleting, or updating files.
2259#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2260pub struct ApplyPatchToolCall {
2261    /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2262    pub id: String,
2263    /// The unique ID of the apply patch tool call generated by the model.
2264    pub call_id: String,
2265    /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2266    pub status: ApplyPatchCallStatus,
2267    /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2268    pub operation: ApplyPatchOperation,
2269    /// The ID of the entity that created this tool call.
2270    #[serde(skip_serializing_if = "Option::is_none")]
2271    pub created_by: Option<String>,
2272}
2273
2274/// Outcome values reported for apply_patch tool call outputs.
2275#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2276#[serde(rename_all = "snake_case")]
2277pub enum ApplyPatchCallOutputStatus {
2278    Completed,
2279    Failed,
2280}
2281
2282/// The output emitted by an apply patch tool call.
2283#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2284pub struct ApplyPatchToolCallOutput {
2285    /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2286    pub id: String,
2287    /// The unique ID of the apply patch tool call generated by the model.
2288    pub call_id: String,
2289    /// The status of the apply patch tool call output. One of `completed` or `failed`.
2290    pub status: ApplyPatchCallOutputStatus,
2291    /// Optional textual output returned by the apply patch tool.
2292    pub output: Option<String>,
2293    /// The ID of the entity that created this tool call output.
2294    #[serde(skip_serializing_if = "Option::is_none")]
2295    pub created_by: Option<String>,
2296}
2297
2298/// Output of an MCP server tool invocation.
2299#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2300pub struct MCPToolCall {
2301    /// A JSON string of the arguments passed to the tool.
2302    pub arguments: String,
2303    /// The unique ID of the tool call.
2304    pub id: String,
2305    /// The name of the tool that was run.
2306    pub name: String,
2307    /// The label of the MCP server running the tool.
2308    pub server_label: String,
2309    /// Unique identifier for the MCP tool call approval request. Include this value
2310    /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2311    /// tool call.
2312    pub approval_request_id: Option<String>,
2313    /// Error message from the call, if any.
2314    pub error: Option<String>,
2315    /// The output from the tool call.
2316    pub output: Option<String>,
2317    /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2318    /// `calling`, or `failed`.
2319    pub status: Option<MCPToolCallStatus>,
2320}
2321
2322#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2323#[serde(rename_all = "snake_case")]
2324pub enum MCPToolCallStatus {
2325    InProgress,
2326    Completed,
2327    Incomplete,
2328    Calling,
2329    Failed,
2330}
2331
2332#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2333pub struct MCPListTools {
2334    /// The unique ID of the list.
2335    pub id: String,
2336    /// The label of the MCP server.
2337    pub server_label: String,
2338    /// The tools available on the server.
2339    pub tools: Vec<MCPListToolsTool>,
2340    /// Error message if listing failed.
2341    #[serde(skip_serializing_if = "Option::is_none")]
2342    pub error: Option<String>,
2343}
2344
2345#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2346pub struct MCPApprovalRequest {
2347    /// JSON string of arguments for the tool.
2348    pub arguments: String,
2349    /// The unique ID of the approval request.
2350    pub id: String,
2351    /// The name of the tool to run.
2352    pub name: String,
2353    /// The label of the MCP server making the request.
2354    pub server_label: String,
2355}
2356
2357#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2358#[serde(untagged)]
2359pub enum Instructions {
2360    /// A text input to the model, equivalent to a text input with the `developer` role.
2361    Text(String),
2362    /// A list of one or many input items to the model, containing different content types.
2363    Array(Vec<InputItem>),
2364}
2365
2366/// The complete response returned by the Responses API.
2367#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2368pub struct Response {
2369    /// Whether to run the model response in the background.
2370    /// [Learn more](https://platform.openai.com/docs/guides/background).
2371    #[serde(skip_serializing_if = "Option::is_none")]
2372    pub background: Option<bool>,
2373
2374    /// Billing information for the response.
2375    #[serde(skip_serializing_if = "Option::is_none")]
2376    pub billing: Option<Billing>,
2377
2378    /// The conversation that this response belongs to. Input items and output
2379    /// items from this response are automatically added to this conversation.
2380    #[serde(skip_serializing_if = "Option::is_none")]
2381    pub conversation: Option<Conversation>,
2382
2383    /// Unix timestamp (in seconds) when this Response was created.
2384    pub created_at: u64,
2385
2386    /// An error object returned when the model fails to generate a Response.
2387    #[serde(skip_serializing_if = "Option::is_none")]
2388    pub error: Option<ErrorObject>,
2389
2390    /// Unique identifier for this response.
2391    pub id: String,
2392
2393    /// Details about why the response is incomplete, if any.
2394    #[serde(skip_serializing_if = "Option::is_none")]
2395    pub incomplete_details: Option<IncompleteDetails>,
2396
2397    /// A system (or developer) message inserted into the model's context.
2398    ///
2399    /// When using along with `previous_response_id`, the instructions from a previous response
2400    /// will not be carried over to the next response. This makes it simple to swap out
2401    /// system (or developer) messages in new responses.
2402    #[serde(skip_serializing_if = "Option::is_none")]
2403    pub instructions: Option<Instructions>,
2404
2405    /// An upper bound for the number of tokens that can be generated for a response,
2406    /// including visible output tokens and
2407    /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2408    #[serde(skip_serializing_if = "Option::is_none")]
2409    pub max_output_tokens: Option<u32>,
2410
2411    /// Set of 16 key-value pairs that can be attached to an object. This can be
2412    /// useful for storing additional information about the object in a structured
2413    /// format, and querying for objects via API or the dashboard.
2414    ///
2415    /// Keys are strings with a maximum length of 64 characters. Values are strings
2416    /// with a maximum length of 512 characters.
2417    #[serde(skip_serializing_if = "Option::is_none")]
2418    pub metadata: Option<HashMap<String, String>>,
2419
2420    /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2421    /// wide range of models with different capabilities, performance characteristics,
2422    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2423    pub model: String,
2424
2425    /// The object type of this resource - always set to `response`.
2426    pub object: String,
2427
2428    /// An array of content items generated by the model.
2429    ///
2430    /// - The length and order of items in the output array is dependent on the model's response.
2431    /// - Rather than accessing the first item in the output array and assuming it's an assistant
2432    ///   message with the content generated by the model, you might consider using
2433    ///   the `output_text` property where supported in SDKs.
2434    pub output: Vec<OutputItem>,
2435
2436    /// SDK-only convenience property that contains the aggregated text output from all
2437    /// `output_text` items in the `output` array, if any are present.
2438    /// Supported in the Python and JavaScript SDKs.
2439    // #[serde(skip_serializing_if = "Option::is_none")]
2440    // pub output_text: Option<String>,
2441
2442    /// Whether to allow the model to run tool calls in parallel.
2443    #[serde(skip_serializing_if = "Option::is_none")]
2444    pub parallel_tool_calls: Option<bool>,
2445
2446    /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2447    /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2448    /// Cannot be used in conjunction with `conversation`.
2449    #[serde(skip_serializing_if = "Option::is_none")]
2450    pub previous_response_id: Option<String>,
2451
2452    /// Reference to a prompt template and its variables.
2453    /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2454    #[serde(skip_serializing_if = "Option::is_none")]
2455    pub prompt: Option<Prompt>,
2456
2457    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2458    /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2459    #[serde(skip_serializing_if = "Option::is_none")]
2460    pub prompt_cache_key: Option<String>,
2461
2462    /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2463    /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2464    /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).    
2465    #[serde(skip_serializing_if = "Option::is_none")]
2466    pub prompt_cache_retention: Option<PromptCacheRetention>,
2467
2468    /// **gpt-5 and o-series models only**
2469    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2470    #[serde(skip_serializing_if = "Option::is_none")]
2471    pub reasoning: Option<Reasoning>,
2472
2473    /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2474    /// usage policies.
2475    ///
2476    /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2477    /// or email address, in order to avoid sending us any identifying information. [Learn
2478    /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2479    #[serde(skip_serializing_if = "Option::is_none")]
2480    pub safety_identifier: Option<String>,
2481
2482    /// Specifies the processing type used for serving the request.
2483    /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2484    /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2485    /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2486    /// - When not set, the default behavior is 'auto'.
2487    ///
2488    /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2489    #[serde(skip_serializing_if = "Option::is_none")]
2490    pub service_tier: Option<ServiceTier>,
2491
2492    /// The status of the response generation.
2493    /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2494    pub status: Status,
2495
2496    /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2497    /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2498    ///
2499    /// We generally recommend altering this or `top_p` but not both.
2500    #[serde(skip_serializing_if = "Option::is_none")]
2501    pub temperature: Option<f32>,
2502
2503    /// Configuration options for a text response from the model. Can be plain
2504    /// text or structured JSON data. Learn more:
2505    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2506    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2507    #[serde(skip_serializing_if = "Option::is_none")]
2508    pub text: Option<ResponseTextParam>,
2509
2510    /// How the model should select which tool (or tools) to use when generating
2511    /// a response. See the `tools` parameter to see how to specify which tools
2512    /// the model can call.
2513    #[serde(skip_serializing_if = "Option::is_none")]
2514    pub tool_choice: Option<ToolChoiceParam>,
2515
2516    /// An array of tools the model may call while generating a response. You
2517    /// can specify which tool to use by setting the `tool_choice` parameter.
2518    ///
2519    /// We support the following categories of tools:
2520    /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2521    ///   model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2522    ///   or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2523    ///   [built-in tools](https://platform.openai.com/docs/guides/tools).
2524    /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2525    ///   or predefined connectors such as Google Drive and SharePoint. Learn more about
2526    ///   [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2527    /// - **Function calls (custom tools)**: Functions that are defined by you,
2528    ///   enabling the model to call your own code with strongly typed arguments
2529    ///   and outputs. Learn more about
2530    ///   [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2531    ///   custom tools to call your own code.
2532    #[serde(skip_serializing_if = "Option::is_none")]
2533    pub tools: Option<Vec<Tool>>,
2534
2535    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2536    /// token position, each with an associated log probability.
2537    #[serde(skip_serializing_if = "Option::is_none")]
2538    pub top_logprobs: Option<u8>,
2539
2540    /// An alternative to sampling with temperature, called nucleus sampling,
2541    /// where the model considers the results of the tokens with top_p probability
2542    /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2543    /// are considered.
2544    ///
2545    /// We generally recommend altering this or `temperature` but not both.
2546    #[serde(skip_serializing_if = "Option::is_none")]
2547    pub top_p: Option<f32>,
2548
2549    ///The truncation strategy to use for the model response.
2550    /// - `auto`: If the input to this Response exceeds
2551    ///   the model's context window size, the model will truncate the
2552    ///   response to fit the context window by dropping items from the beginning of the conversation.
2553    /// - `disabled` (default): If the input size will exceed the context window
2554    ///   size for a model, the request will fail with a 400 error.
2555    #[serde(skip_serializing_if = "Option::is_none")]
2556    pub truncation: Option<Truncation>,
2557
2558    /// Represents token usage details including input tokens, output tokens,
2559    /// a breakdown of output tokens, and the total tokens used.
2560    #[serde(skip_serializing_if = "Option::is_none")]
2561    pub usage: Option<ResponseUsage>,
2562}
2563
2564#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2565#[serde(rename_all = "snake_case")]
2566pub enum Status {
2567    Completed,
2568    Failed,
2569    InProgress,
2570    Cancelled,
2571    Queued,
2572    Incomplete,
2573}
2574
2575/// Output item
2576#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2577#[serde(tag = "type")]
2578#[serde(rename_all = "snake_case")]
2579pub enum OutputItem {
2580    /// An output message from the model.
2581    Message(OutputMessage),
2582    /// The results of a file search tool call. See the
2583    /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2584    /// for more information.
2585    FileSearchCall(FileSearchToolCall),
2586    /// A tool call to run a function. See the
2587    /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2588    /// for more information.
2589    FunctionCall(FunctionToolCall),
2590    /// The results of a web search tool call. See the
2591    /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2592    /// for more information.
2593    WebSearchCall(WebSearchToolCall),
2594    /// A tool call to a computer use tool. See the
2595    /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2596    /// for more information.
2597    ComputerCall(ComputerToolCall),
2598    /// A description of the chain of thought used by a reasoning model while generating
2599    /// a response. Be sure to include these items in your `input` to the Responses API for
2600    /// subsequent turns of a conversation if you are manually
2601    /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2602    Reasoning(ReasoningItem),
2603    /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
2604    Compaction(CompactionBody),
2605    /// An image generation request made by the model.
2606    ImageGenerationCall(ImageGenToolCall),
2607    /// A tool call to run code.
2608    CodeInterpreterCall(CodeInterpreterToolCall),
2609    /// A tool call to run a command on the local shell.
2610    LocalShellCall(LocalShellToolCall),
2611    /// A tool call that executes one or more shell commands in a managed environment.
2612    ShellCall(FunctionShellCall),
2613    /// The output of a shell tool call.
2614    ShellCallOutput(FunctionShellCallOutput),
2615    /// A tool call that applies file diffs by creating, deleting, or updating files.
2616    ApplyPatchCall(ApplyPatchToolCall),
2617    /// The output emitted by an apply patch tool call.
2618    ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2619    /// An invocation of a tool on an MCP server.
2620    McpCall(MCPToolCall),
2621    /// A list of tools available on an MCP server.
2622    McpListTools(MCPListTools),
2623    /// A request for human approval of a tool invocation.
2624    McpApprovalRequest(MCPApprovalRequest),
2625    /// A call to a custom tool created by the model.
2626    CustomToolCall(CustomToolCall),
2627}
2628
2629#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2630#[non_exhaustive]
2631pub struct CustomToolCall {
2632    /// An identifier used to map this custom tool call to a tool call output.
2633    pub call_id: String,
2634    /// The input for the custom tool call generated by the model.
2635    pub input: String,
2636    /// The name of the custom tool being called.
2637    pub name: String,
2638    /// The unique ID of the custom tool call in the OpenAI platform.
2639    pub id: String,
2640}
2641
2642#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2643pub struct DeleteResponse {
2644    pub object: String,
2645    pub deleted: bool,
2646    pub id: String,
2647}
2648
2649#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2650pub struct AnyItemReference {
2651    pub r#type: Option<String>,
2652    pub id: String,
2653}
2654
2655#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2656#[serde(tag = "type", rename_all = "snake_case")]
2657pub enum ItemResourceItem {
2658    Message(MessageItem),
2659    FileSearchCall(FileSearchToolCall),
2660    ComputerCall(ComputerToolCall),
2661    ComputerCallOutput(ComputerCallOutputItemParam),
2662    WebSearchCall(WebSearchToolCall),
2663    FunctionCall(FunctionToolCall),
2664    FunctionCallOutput(FunctionCallOutputItemParam),
2665    ImageGenerationCall(ImageGenToolCall),
2666    CodeInterpreterCall(CodeInterpreterToolCall),
2667    LocalShellCall(LocalShellToolCall),
2668    LocalShellCallOutput(LocalShellToolCallOutput),
2669    ShellCall(FunctionShellCallItemParam),
2670    ShellCallOutput(FunctionShellCallOutputItemParam),
2671    ApplyPatchCall(ApplyPatchToolCallItemParam),
2672    ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2673    McpListTools(MCPListTools),
2674    McpApprovalRequest(MCPApprovalRequest),
2675    McpApprovalResponse(MCPApprovalResponse),
2676    McpCall(MCPToolCall),
2677}
2678
2679#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2680#[serde(untagged)]
2681pub enum ItemResource {
2682    ItemReference(AnyItemReference),
2683    Item(ItemResourceItem),
2684}
2685
2686/// A list of Response items.
2687#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2688pub struct ResponseItemList {
2689    /// The type of object returned, must be `list`.
2690    pub object: String,
2691    /// The ID of the first item in the list.
2692    pub first_id: Option<String>,
2693    /// The ID of the last item in the list.
2694    pub last_id: Option<String>,
2695    /// Whether there are more items in the list.
2696    pub has_more: bool,
2697    /// The list of items.
2698    pub data: Vec<ItemResource>,
2699}
2700
2701#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2702#[builder(
2703    name = "TokenCountsBodyArgs",
2704    pattern = "mutable",
2705    setter(into, strip_option),
2706    default
2707)]
2708#[builder(build_fn(error = "OpenAIError"))]
2709pub struct TokenCountsBody {
2710    /// The conversation that this response belongs to. Items from this
2711    /// conversation are prepended to `input_items` for this response request.
2712    /// Input items and output items from this response are automatically added to this
2713    /// conversation after this response completes.
2714    #[serde(skip_serializing_if = "Option::is_none")]
2715    pub conversation: Option<ConversationParam>,
2716
2717    /// Text, image, or file inputs to the model, used to generate a response
2718    #[serde(skip_serializing_if = "Option::is_none")]
2719    pub input: Option<InputParam>,
2720
2721    /// A system (or developer) message inserted into the model's context.
2722    ///
2723    /// When used along with `previous_response_id`, the instructions from a previous response will
2724    /// not be carried over to the next response. This makes it simple to swap out system (or
2725    /// developer) messages in new responses.
2726    #[serde(skip_serializing_if = "Option::is_none")]
2727    pub instructions: Option<String>,
2728
2729    /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2730    /// wide range of models with different capabilities, performance characteristics,
2731    /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2732    /// to browse and compare available models.
2733    #[serde(skip_serializing_if = "Option::is_none")]
2734    pub model: Option<String>,
2735
2736    /// Whether to allow the model to run tool calls in parallel.
2737    #[serde(skip_serializing_if = "Option::is_none")]
2738    pub parallel_tool_calls: Option<bool>,
2739
2740    /// The unique ID of the previous response to the model. Use this to create multi-turn
2741    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2742    /// Cannot be used in conjunction with `conversation`.
2743    #[serde(skip_serializing_if = "Option::is_none")]
2744    pub previous_response_id: Option<String>,
2745
2746    /// **gpt-5 and o-series models only**
2747    /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2748    #[serde(skip_serializing_if = "Option::is_none")]
2749    pub reasoning: Option<Reasoning>,
2750
2751    /// Configuration options for a text response from the model. Can be plain
2752    /// text or structured JSON data. Learn more:
2753    /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2754    /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2755    #[serde(skip_serializing_if = "Option::is_none")]
2756    pub text: Option<ResponseTextParam>,
2757
2758    /// How the model should select which tool (or tools) to use when generating
2759    /// a response. See the `tools` parameter to see how to specify which tools
2760    /// the model can call.
2761    #[serde(skip_serializing_if = "Option::is_none")]
2762    pub tool_choice: Option<ToolChoiceParam>,
2763
2764    /// An array of tools the model may call while generating a response. You can specify which tool
2765    /// to use by setting the `tool_choice` parameter.
2766    #[serde(skip_serializing_if = "Option::is_none")]
2767    pub tools: Option<Vec<Tool>>,
2768
2769    ///The truncation strategy to use for the model response.
2770    /// - `auto`: If the input to this Response exceeds
2771    ///   the model's context window size, the model will truncate the
2772    ///   response to fit the context window by dropping items from the beginning of the conversation.
2773    /// - `disabled` (default): If the input size will exceed the context window
2774    ///   size for a model, the request will fail with a 400 error.
2775    #[serde(skip_serializing_if = "Option::is_none")]
2776    pub truncation: Option<Truncation>,
2777}
2778
2779#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2780pub struct TokenCountsResource {
2781    pub object: String,
2782    pub input_tokens: u32,
2783}
2784
2785/// A compaction item generated by the `/v1/responses/compact` API.
2786#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2787pub struct CompactionSummaryItemParam {
2788    /// The ID of the compaction item.
2789    #[serde(skip_serializing_if = "Option::is_none")]
2790    pub id: Option<String>,
2791    /// The encrypted content.
2792    pub encrypted_content: String,
2793}
2794
2795/// A compaction item generated by the `/v1/responses/compact` API.
2796#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2797pub struct CompactionBody {
2798    /// The unique ID of the compaction item.
2799    pub id: String,
2800    /// The encrypted content.
2801    pub encrypted_content: String,
2802    /// Created by model/user identifier.
2803    #[serde(skip_serializing_if = "Option::is_none")]
2804    pub created_by: Option<String>,
2805}
2806
2807/// Request to compact a conversation.
2808#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
2809#[builder(name = "CompactResponseRequestArgs")]
2810#[builder(pattern = "mutable")]
2811#[builder(setter(into, strip_option), default)]
2812#[builder(derive(Debug))]
2813#[builder(build_fn(error = "OpenAIError"))]
2814pub struct CompactResponseRequest {
2815    /// Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of models
2816    /// with different capabilities, performance characteristics, and price points. Refer to the
2817    /// [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2818    pub model: String,
2819
2820    /// Text, image, or file inputs to the model, used to generate a response
2821    #[serde(skip_serializing_if = "Option::is_none")]
2822    pub input: Option<InputParam>,
2823
2824    /// The unique ID of the previous response to the model. Use this to create multi-turn
2825    /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2826    /// Cannot be used in conjunction with `conversation`.
2827    #[serde(skip_serializing_if = "Option::is_none")]
2828    pub previous_response_id: Option<String>,
2829
2830    /// A system (or developer) message inserted into the model's context.
2831    ///
2832    /// When used along with `previous_response_id`, the instructions from a previous response will
2833    /// not be carried over to the next response. This makes it simple to swap out system (or
2834    /// developer) messages in new responses.
2835    #[serde(skip_serializing_if = "Option::is_none")]
2836    pub instructions: Option<String>,
2837}
2838
2839/// The compacted response object.
2840#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2841pub struct CompactResource {
2842    /// The unique identifier for the compacted response.
2843    pub id: String,
2844    /// The object type. Always `response.compaction`.
2845    pub object: String,
2846    /// The compacted list of output items. This is a list of all user messages,
2847    /// followed by a single compaction item.
2848    pub output: Vec<OutputItem>,
2849    /// Unix timestamp (in seconds) when the compacted conversation was created.
2850    pub created_at: u64,
2851    /// Token accounting for the compaction pass, including cached, reasoning, and total tokens.
2852    pub usage: ResponseUsage,
2853}