async_openai/types/responses/response.rs
1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4 CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5 ResponseUsage,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15 #[default]
16 User,
17 Assistant,
18 System,
19 Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26 InProgress,
27 Completed,
28 Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34 /// A text input to the model, equivalent to a text input with the
35 /// `user` role.
36 Text(String),
37 /// A list of one or many input items to the model, containing
38 /// different content types.
39 Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52 /// A message (type: "message").
53 /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54 ///
55 /// InputMessage:
56 /// A message input to the model with a role indicating instruction following hierarchy.
57 /// Instructions given with the developer or system role take precedence over instructions given with the user role.
58 /// OutputMessage:
59 /// A message output from the model.
60 Message(MessageItem),
61
62 /// The results of a file search tool call. See the
63 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64 FileSearchCall(FileSearchToolCall),
65
66 /// A tool call to a computer use tool. See the
67 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68 ComputerCall(ComputerToolCall),
69
70 /// The output of a computer tool call.
71 ComputerCallOutput(ComputerCallOutputItemParam),
72
73 /// The results of a web search tool call. See the
74 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75 WebSearchCall(WebSearchToolCall),
76
77 /// A tool call to run a function. See the
78 ///
79 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80 FunctionCall(FunctionToolCall),
81
82 /// The output of a function tool call.
83 FunctionCallOutput(FunctionCallOutputItemParam),
84
85 /// A description of the chain of thought used by a reasoning model while generating
86 /// a response. Be sure to include these items in your `input` to the Responses API
87 /// for subsequent turns of a conversation if you are manually
88 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89 Reasoning(ReasoningItem),
90
91 /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
92 Compaction(CompactionSummaryItemParam),
93
94 /// An image generation request made by the model.
95 ImageGenerationCall(ImageGenToolCall),
96
97 /// A tool call to run code.
98 CodeInterpreterCall(CodeInterpreterToolCall),
99
100 /// A tool call to run a command on the local shell.
101 LocalShellCall(LocalShellToolCall),
102
103 /// The output of a local shell tool call.
104 LocalShellCallOutput(LocalShellToolCallOutput),
105
106 /// A tool representing a request to execute one or more shell commands.
107 ShellCall(FunctionShellCallItemParam),
108
109 /// The streamed output items emitted by a shell tool call.
110 ShellCallOutput(FunctionShellCallOutputItemParam),
111
112 /// A tool call representing a request to create, delete, or update files using diff patches.
113 ApplyPatchCall(ApplyPatchToolCallItemParam),
114
115 /// The streamed output emitted by an apply patch tool call.
116 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
117
118 /// A list of tools available on an MCP server.
119 McpListTools(MCPListTools),
120
121 /// A request for human approval of a tool invocation.
122 McpApprovalRequest(MCPApprovalRequest),
123
124 /// A response to an MCP approval request.
125 McpApprovalResponse(MCPApprovalResponse),
126
127 /// An invocation of a tool on an MCP server.
128 McpCall(MCPToolCall),
129
130 /// The output of a custom tool call from your code, being sent back to the model.
131 CustomToolCallOutput(CustomToolCallOutput),
132
133 /// A call to a custom tool created by the model.
134 CustomToolCall(CustomToolCall),
135}
136
137/// Input item that can be used in the context for generating a response.
138///
139/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
140/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
141/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
142/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
143///
144/// Uses untagged deserialization because these types overlap in structure.
145/// Order matters: more specific structures are tried first.
146///
147/// # OpenAPI Specification
148/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
150#[serde(untagged)]
151pub enum InputItem {
152 /// A reference to an existing item by ID.
153 /// Has a required `id` field and optional `type` (can be "item_reference" or null).
154 /// Must be tried first as it's the most minimal structure.
155 ItemReference(ItemReference),
156
157 /// All structured items with proper type discrimination.
158 /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
159 /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
160 Item(Item),
161
162 /// A simple, user-friendly message input (EasyInputMessage).
163 /// Supports string content and can include assistant role for previous responses.
164 /// Must be tried last as it's the most flexible structure.
165 ///
166 /// A message input to the model with a role indicating instruction following
167 /// hierarchy. Instructions given with the `developer` or `system` role take
168 /// precedence over instructions given with the `user` role. Messages with the
169 /// `assistant` role are presumed to have been generated by the model in previous
170 /// interactions.
171 EasyMessage(EasyInputMessage),
172}
173
174/// A message item used within the `Item` enum.
175///
176/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
177/// enum to distinguish them based on their structure:
178/// - OutputMessage: role=assistant, required id & status fields
179/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
180///
181/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
182/// not part of the structured `Item` enum.
183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
184#[serde(untagged)]
185pub enum MessageItem {
186 /// An output message from the model (role: assistant, has required id & status).
187 /// This must come first as it has the most specific structure (required id and status fields).
188 Output(OutputMessage),
189
190 /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
191 /// Has structured content list and optional id/status fields.
192 ///
193 /// A message input to the model with a role indicating instruction following hierarchy.
194 /// Instructions given with the `developer` or `system` role take precedence over instructions
195 /// given with the `user` role.
196 Input(InputMessage),
197}
198
199/// A reference to an existing item by ID.
200#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
201pub struct ItemReference {
202 /// The type of item to reference. Can be "item_reference" or null.
203 #[serde(skip_serializing_if = "Option::is_none")]
204 pub r#type: Option<ItemReferenceType>,
205 /// The ID of the item to reference.
206 pub id: String,
207}
208
209#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
210#[serde(rename_all = "snake_case")]
211pub enum ItemReferenceType {
212 ItemReference,
213}
214
215/// Output from a function call that you're providing back to the model.
216#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
217pub struct FunctionCallOutputItemParam {
218 /// The unique ID of the function tool call generated by the model.
219 pub call_id: String,
220 /// Text, image, or file output of the function tool call.
221 pub output: FunctionCallOutput,
222 /// The unique ID of the function tool call output.
223 /// Populated when this item is returned via API.
224 #[serde(skip_serializing_if = "Option::is_none")]
225 pub id: Option<String>,
226 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
227 /// Populated when items are returned via API.
228 #[serde(skip_serializing_if = "Option::is_none")]
229 pub status: Option<OutputStatus>,
230}
231
232#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
233#[serde(untagged)]
234pub enum FunctionCallOutput {
235 /// A JSON string of the output of the function tool call.
236 Text(String),
237 Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
238}
239
240#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
241pub struct ComputerCallOutputItemParam {
242 /// The ID of the computer tool call that produced the output.
243 pub call_id: String,
244 /// A computer screenshot image used with the computer use tool.
245 pub output: ComputerScreenshotImage,
246 /// The safety checks reported by the API that have been acknowledged by the developer.
247 #[serde(skip_serializing_if = "Option::is_none")]
248 pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
249 /// The unique ID of the computer tool call output. Optional when creating.
250 #[serde(skip_serializing_if = "Option::is_none")]
251 pub id: Option<String>,
252 /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
253 /// Populated when input items are returned via API.
254 #[serde(skip_serializing_if = "Option::is_none")]
255 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
256}
257
258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
259#[serde(rename_all = "snake_case")]
260pub enum ComputerScreenshotImageType {
261 ComputerScreenshot,
262}
263
264/// A computer screenshot image used with the computer use tool.
265#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
266pub struct ComputerScreenshotImage {
267 /// Specifies the event type. For a computer screenshot, this property is always
268 /// set to `computer_screenshot`.
269 pub r#type: ComputerScreenshotImageType,
270 /// The identifier of an uploaded file that contains the screenshot.
271 #[serde(skip_serializing_if = "Option::is_none")]
272 pub file_id: Option<String>,
273 /// The URL of the screenshot image.
274 #[serde(skip_serializing_if = "Option::is_none")]
275 pub image_url: Option<String>,
276}
277
278/// Output from a local shell tool call that you're providing back to the model.
279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
280pub struct LocalShellToolCallOutput {
281 /// The unique ID of the local shell tool call generated by the model.
282 pub id: String,
283
284 /// A JSON string of the output of the local shell tool call.
285 pub output: String,
286
287 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
288 #[serde(skip_serializing_if = "Option::is_none")]
289 pub status: Option<OutputStatus>,
290}
291
292/// Output from a local shell command execution.
293#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
294pub struct LocalShellOutput {
295 /// The stdout output from the command.
296 #[serde(skip_serializing_if = "Option::is_none")]
297 pub stdout: Option<String>,
298
299 /// The stderr output from the command.
300 #[serde(skip_serializing_if = "Option::is_none")]
301 pub stderr: Option<String>,
302
303 /// The exit code of the command.
304 #[serde(skip_serializing_if = "Option::is_none")]
305 pub exit_code: Option<i32>,
306}
307
308/// An MCP approval response that you're providing back to the model.
309#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
310pub struct MCPApprovalResponse {
311 /// The ID of the approval request being answered.
312 pub approval_request_id: String,
313
314 /// Whether the request was approved.
315 pub approve: bool,
316
317 /// The unique ID of the approval response
318 #[serde(skip_serializing_if = "Option::is_none")]
319 pub id: Option<String>,
320
321 /// Optional reason for the decision.
322 #[serde(skip_serializing_if = "Option::is_none")]
323 pub reason: Option<String>,
324}
325
326#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
327#[serde(untagged)]
328pub enum CustomToolCallOutputOutput {
329 /// A string of the output of the custom tool call.
330 Text(String),
331 /// Text, image, or file output of the custom tool call.
332 List(Vec<InputContent>),
333}
334
335#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
336pub struct CustomToolCallOutput {
337 /// The call ID, used to map this custom tool call output to a custom tool call.
338 pub call_id: String,
339
340 /// The output from the custom tool call generated by your code.
341 /// Can be a string or an list of output content.
342 pub output: CustomToolCallOutputOutput,
343
344 /// The unique ID of the custom tool call output in the OpenAI platform.
345 #[serde(skip_serializing_if = "Option::is_none")]
346 pub id: Option<String>,
347}
348
349/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
350///
351/// This is the most user-friendly way to provide messages, supporting both simple
352/// string content and structured content. Role can include `assistant` for providing
353/// previous assistant responses.
354#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
355#[builder(
356 name = "EasyInputMessageArgs",
357 pattern = "mutable",
358 setter(into, strip_option),
359 default
360)]
361#[builder(build_fn(error = "OpenAIError"))]
362pub struct EasyInputMessage {
363 /// The type of the message input. Always set to `message`.
364 pub r#type: MessageType,
365 /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
366 pub role: Role,
367 /// Text, image, or audio input to the model, used to generate a response.
368 /// Can also contain previous assistant responses.
369 pub content: EasyInputContent,
370}
371
372/// A structured message input to the model (InputMessage in the OpenAPI spec).
373///
374/// This variant requires structured content (not a simple string) and does not support
375/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
377#[builder(
378 name = "InputMessageArgs",
379 pattern = "mutable",
380 setter(into, strip_option),
381 default
382)]
383#[builder(build_fn(error = "OpenAIError"))]
384pub struct InputMessage {
385 /// A list of one or many input items to the model, containing different content types.
386 pub content: Vec<InputContent>,
387 /// The role of the message input. One of `user`, `system`, or `developer`.
388 /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
389 pub role: InputRole,
390 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
391 /// Populated when items are returned via API.
392 #[serde(skip_serializing_if = "Option::is_none")]
393 pub status: Option<OutputStatus>,
394 /////The type of the message input. Always set to `message`.
395 //pub r#type: MessageType,
396}
397
398/// The role for an input message - can only be `user`, `system`, or `developer`.
399/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
400#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
401#[serde(rename_all = "lowercase")]
402pub enum InputRole {
403 #[default]
404 User,
405 System,
406 Developer,
407}
408
409/// Content for EasyInputMessage - can be a simple string or structured list.
410#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
411#[serde(untagged)]
412pub enum EasyInputContent {
413 /// A text input to the model.
414 Text(String),
415 /// A list of one or many input items to the model, containing different content types.
416 ContentList(Vec<InputContent>),
417}
418
419/// Parts of a message: text, image, file, or audio.
420#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
421#[serde(tag = "type", rename_all = "snake_case")]
422pub enum InputContent {
423 /// A text input to the model.
424 InputText(InputTextContent),
425 /// An image input to the model. Learn about
426 /// [image inputs](https://platform.openai.com/docs/guides/vision).
427 InputImage(InputImageContent),
428 /// A file input to the model.
429 InputFile(InputFileContent),
430}
431
432#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
433pub struct InputTextContent {
434 /// The text input to the model.
435 pub text: String,
436}
437
438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
439#[builder(
440 name = "InputImageArgs",
441 pattern = "mutable",
442 setter(into, strip_option),
443 default
444)]
445#[builder(build_fn(error = "OpenAIError"))]
446pub struct InputImageContent {
447 /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
448 /// Defaults to `auto`.
449 pub detail: ImageDetail,
450 /// The ID of the file to be sent to the model.
451 #[serde(skip_serializing_if = "Option::is_none")]
452 pub file_id: Option<String>,
453 /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
454 /// in a data URL.
455 #[serde(skip_serializing_if = "Option::is_none")]
456 pub image_url: Option<String>,
457}
458
459#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
460#[builder(
461 name = "InputFileArgs",
462 pattern = "mutable",
463 setter(into, strip_option),
464 default
465)]
466#[builder(build_fn(error = "OpenAIError"))]
467pub struct InputFileContent {
468 /// The content of the file to be sent to the model.
469 #[serde(skip_serializing_if = "Option::is_none")]
470 file_data: Option<String>,
471 /// The ID of the file to be sent to the model.
472 #[serde(skip_serializing_if = "Option::is_none")]
473 file_id: Option<String>,
474 /// The URL of the file to be sent to the model.
475 #[serde(skip_serializing_if = "Option::is_none")]
476 file_url: Option<String>,
477 /// The name of the file to be sent to the model.
478 #[serde(skip_serializing_if = "Option::is_none")]
479 filename: Option<String>,
480}
481
482/// The conversation that this response belonged to. Input items and output items from this
483/// response were automatically added to this conversation.
484#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
485pub struct Conversation {
486 /// The unique ID of the conversation that this response was associated with.
487 pub id: String,
488}
489
490#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
491#[serde(untagged)]
492pub enum ConversationParam {
493 /// The unique ID of the conversation.
494 ConversationID(String),
495 /// The conversation that this response belongs to.
496 Object(Conversation),
497}
498
499#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
500pub enum IncludeEnum {
501 #[serde(rename = "file_search_call.results")]
502 FileSearchCallResults,
503 #[serde(rename = "web_search_call.results")]
504 WebSearchCallResults,
505 #[serde(rename = "web_search_call.action.sources")]
506 WebSearchCallActionSources,
507 #[serde(rename = "message.input_image.image_url")]
508 MessageInputImageImageUrl,
509 #[serde(rename = "computer_call_output.output.image_url")]
510 ComputerCallOutputOutputImageUrl,
511 #[serde(rename = "code_interpreter_call.outputs")]
512 CodeInterpreterCallOutputs,
513 #[serde(rename = "reasoning.encrypted_content")]
514 ReasoningEncryptedContent,
515 #[serde(rename = "message.output_text.logprobs")]
516 MessageOutputTextLogprobs,
517}
518
519#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
520pub struct ResponseStreamOptions {
521 /// When true, stream obfuscation will be enabled. Stream obfuscation adds
522 /// random characters to an `obfuscation` field on streaming delta events to
523 /// normalize payload sizes as a mitigation to certain side-channel attacks.
524 /// These obfuscation fields are included by default, but add a small amount
525 /// of overhead to the data stream. You can set `include_obfuscation` to
526 /// false to optimize for bandwidth if you trust the network links between
527 /// your application and the OpenAI API.
528 #[serde(skip_serializing_if = "Option::is_none")]
529 pub include_obfuscation: Option<bool>,
530}
531
532/// Builder for a Responses API request.
533#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
534#[builder(
535 name = "CreateResponseArgs",
536 pattern = "mutable",
537 setter(into, strip_option),
538 default
539)]
540#[builder(build_fn(error = "OpenAIError"))]
541pub struct CreateResponse {
542 /// Whether to run the model response in the background.
543 /// [Learn more](https://platform.openai.com/docs/guides/background).
544 #[serde(skip_serializing_if = "Option::is_none")]
545 pub background: Option<bool>,
546
547 /// The conversation that this response belongs to. Items from this conversation are prepended to
548 /// `input_items` for this response request.
549 ///
550 /// Input items and output items from this response are automatically added to this conversation after
551 /// this response completes.
552 #[serde(skip_serializing_if = "Option::is_none")]
553 pub conversation: Option<ConversationParam>,
554
555 /// Specify additional output data to include in the model response. Currently supported
556 /// values are:
557 ///
558 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
559 ///
560 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
561 /// interpreter tool call items.
562 ///
563 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
564 /// output.
565 ///
566 /// - `file_search_call.results`: Include the search results of the file search tool call.
567 ///
568 /// - `message.input_image.image_url`: Include image urls from the input message.
569 ///
570 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
571 ///
572 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
573 /// reasoning item outputs. This enables reasoning items to be used in multi-turn
574 /// conversations when using the Responses API statelessly (like when the `store` parameter is
575 /// set to `false`, or when an organization is enrolled in the zero data retention program).
576 #[serde(skip_serializing_if = "Option::is_none")]
577 pub include: Option<Vec<IncludeEnum>>,
578
579 /// Text, image, or file inputs to the model, used to generate a response.
580 ///
581 /// Learn more:
582 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
583 /// - [Image inputs](https://platform.openai.com/docs/guides/images)
584 /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
585 /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
586 /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
587 pub input: InputParam,
588
589 /// A system (or developer) message inserted into the model's context.
590 ///
591 /// When using along with `previous_response_id`, the instructions from a previous
592 /// response will not be carried over to the next response. This makes it simple
593 /// to swap out system (or developer) messages in new responses.
594 #[serde(skip_serializing_if = "Option::is_none")]
595 pub instructions: Option<String>,
596
597 /// An upper bound for the number of tokens that can be generated for a response, including
598 /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
599 #[serde(skip_serializing_if = "Option::is_none")]
600 pub max_output_tokens: Option<u32>,
601
602 /// The maximum number of total calls to built-in tools that can be processed in a response. This
603 /// maximum number applies across all built-in tool calls, not per individual tool. Any further
604 /// attempts to call a tool by the model will be ignored.
605 #[serde(skip_serializing_if = "Option::is_none")]
606 pub max_tool_calls: Option<u32>,
607
608 /// Set of 16 key-value pairs that can be attached to an object. This can be
609 /// useful for storing additional information about the object in a structured
610 /// format, and querying for objects via API or the dashboard.
611 ///
612 /// Keys are strings with a maximum length of 64 characters. Values are
613 /// strings with a maximum length of 512 characters.
614 #[serde(skip_serializing_if = "Option::is_none")]
615 pub metadata: Option<HashMap<String, String>>,
616
617 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
618 /// offers a wide range of models with different capabilities, performance
619 /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
620 /// to browse and compare available models.
621 #[serde(skip_serializing_if = "Option::is_none")]
622 pub model: Option<String>,
623
624 /// Whether to allow the model to run tool calls in parallel.
625 #[serde(skip_serializing_if = "Option::is_none")]
626 pub parallel_tool_calls: Option<bool>,
627
628 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
629 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
630 /// Cannot be used in conjunction with `conversation`.
631 #[serde(skip_serializing_if = "Option::is_none")]
632 pub previous_response_id: Option<String>,
633
634 /// Reference to a prompt template and its variables.
635 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
636 #[serde(skip_serializing_if = "Option::is_none")]
637 pub prompt: Option<Prompt>,
638
639 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
640 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
641 #[serde(skip_serializing_if = "Option::is_none")]
642 pub prompt_cache_key: Option<String>,
643
644 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
645 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
646 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
647 #[serde(skip_serializing_if = "Option::is_none")]
648 pub prompt_cache_retention: Option<PromptCacheRetention>,
649
650 /// **gpt-5 and o-series models only**
651 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
652 #[serde(skip_serializing_if = "Option::is_none")]
653 pub reasoning: Option<Reasoning>,
654
655 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
656 /// usage policies.
657 ///
658 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
659 /// or email address, in order to avoid sending us any identifying information. [Learn
660 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
661 #[serde(skip_serializing_if = "Option::is_none")]
662 pub safety_identifier: Option<String>,
663
664 /// Specifies the processing type used for serving the request.
665 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
666 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
667 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
668 /// - When not set, the default behavior is 'auto'.
669 ///
670 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
671 #[serde(skip_serializing_if = "Option::is_none")]
672 pub service_tier: Option<ServiceTier>,
673
674 /// Whether to store the generated model response for later retrieval via API.
675 #[serde(skip_serializing_if = "Option::is_none")]
676 pub store: Option<bool>,
677
678 /// If set to true, the model response data will be streamed to the client
679 /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
680 /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
681 /// for more information.
682 #[serde(skip_serializing_if = "Option::is_none")]
683 pub stream: Option<bool>,
684
685 /// Options for streaming responses. Only set this when you set `stream: true`.
686 #[serde(skip_serializing_if = "Option::is_none")]
687 pub stream_options: Option<ResponseStreamOptions>,
688
689 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
690 /// will make the output more random, while lower values like 0.2 will make it
691 /// more focused and deterministic. We generally recommend altering this or
692 /// `top_p` but not both.
693 #[serde(skip_serializing_if = "Option::is_none")]
694 pub temperature: Option<f32>,
695
696 /// Configuration options for a text response from the model. Can be plain
697 /// text or structured JSON data. Learn more:
698 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
699 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
700 #[serde(skip_serializing_if = "Option::is_none")]
701 pub text: Option<ResponseTextParam>,
702
703 /// How the model should select which tool (or tools) to use when generating
704 /// a response. See the `tools` parameter to see how to specify which tools
705 /// the model can call.
706 #[serde(skip_serializing_if = "Option::is_none")]
707 pub tool_choice: Option<ToolChoiceParam>,
708
709 /// An array of tools the model may call while generating a response. You
710 /// can specify which tool to use by setting the `tool_choice` parameter.
711 ///
712 /// We support the following categories of tools:
713 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
714 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
715 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
716 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
717 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
718 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
719 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
720 /// - **Function calls (custom tools)**: Functions that are defined by you,
721 /// enabling the model to call your own code with strongly typed arguments
722 /// and outputs. Learn more about
723 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
724 /// custom tools to call your own code.
725 #[serde(skip_serializing_if = "Option::is_none")]
726 pub tools: Option<Vec<Tool>>,
727
728 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
729 /// token position, each with an associated log probability.
730 #[serde(skip_serializing_if = "Option::is_none")]
731 pub top_logprobs: Option<u8>,
732
733 /// An alternative to sampling with temperature, called nucleus sampling,
734 /// where the model considers the results of the tokens with top_p probability
735 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
736 /// are considered.
737 ///
738 /// We generally recommend altering this or `temperature` but not both.
739 #[serde(skip_serializing_if = "Option::is_none")]
740 pub top_p: Option<f32>,
741
742 ///The truncation strategy to use for the model response.
743 /// - `auto`: If the input to this Response exceeds
744 /// the model's context window size, the model will truncate the
745 /// response to fit the context window by dropping items from the beginning of the conversation.
746 /// - `disabled` (default): If the input size will exceed the context window
747 /// size for a model, the request will fail with a 400 error.
748 #[serde(skip_serializing_if = "Option::is_none")]
749 pub truncation: Option<Truncation>,
750}
751
752#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
753#[serde(untagged)]
754pub enum ResponsePromptVariables {
755 String(String),
756 Content(InputContent),
757 Custom(serde_json::Value),
758}
759
760#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
761pub struct Prompt {
762 /// The unique identifier of the prompt template to use.
763 pub id: String,
764
765 /// Optional version of the prompt template.
766 #[serde(skip_serializing_if = "Option::is_none")]
767 pub version: Option<String>,
768
769 /// Optional map of values to substitute in for variables in your
770 /// prompt. The substitution values can either be strings, or other
771 /// Response input types like images or files.
772 #[serde(skip_serializing_if = "Option::is_none")]
773 pub variables: Option<ResponsePromptVariables>,
774}
775
776#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
777#[serde(rename_all = "lowercase")]
778pub enum ServiceTier {
779 #[default]
780 Auto,
781 Default,
782 Flex,
783 Scale,
784 Priority,
785}
786
787/// Truncation strategies.
788#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
789#[serde(rename_all = "lowercase")]
790pub enum Truncation {
791 Auto,
792 Disabled,
793}
794
795#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
796pub struct Billing {
797 pub payer: String,
798}
799
800/// o-series reasoning settings.
801#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
802#[builder(
803 name = "ReasoningArgs",
804 pattern = "mutable",
805 setter(into, strip_option),
806 default
807)]
808#[builder(build_fn(error = "OpenAIError"))]
809pub struct Reasoning {
810 /// Constrains effort on reasoning for
811 /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
812 /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
813 /// reasoning effort can result in faster responses and fewer tokens used
814 /// on reasoning in a response.
815 ///
816 /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
817 #[serde(skip_serializing_if = "Option::is_none")]
818 pub effort: Option<ReasoningEffort>,
819 /// A summary of the reasoning performed by the model. This can be
820 /// useful for debugging and understanding the model's reasoning process.
821 /// One of `auto`, `concise`, or `detailed`.
822 ///
823 /// `concise` is supported for `computer-use-preview` models and all reasoning models after
824 /// `gpt-5`.
825 #[serde(skip_serializing_if = "Option::is_none")]
826 pub summary: Option<ReasoningSummary>,
827}
828
829/// o-series reasoning settings.
830#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
831#[serde(rename_all = "lowercase")]
832pub enum Verbosity {
833 Low,
834 Medium,
835 High,
836}
837
838#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
839#[serde(rename_all = "lowercase")]
840pub enum ReasoningSummary {
841 Auto,
842 Concise,
843 Detailed,
844}
845
846/// The retention policy for the prompt cache.
847#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
848pub enum PromptCacheRetention {
849 #[serde(rename = "in-memory")]
850 InMemory,
851 #[serde(rename = "24h")]
852 Hours24,
853}
854
855/// Configuration for text response format.
856#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
857pub struct ResponseTextParam {
858 /// An object specifying the format that the model must output.
859 ///
860 /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
861 /// which ensures the model will match your supplied JSON schema. Learn more in the
862 /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
863 ///
864 /// The default format is `{ "type": "text" }` with no additional options.
865 ///
866 /// **Not recommended for gpt-4o and newer models:**
867 ///
868 /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
869 /// ensures the message the model generates is valid JSON. Using `json_schema`
870 /// is preferred for models that support it.
871 pub format: TextResponseFormatConfiguration,
872
873 /// Constrains the verbosity of the model's response. Lower values will result in
874 /// more concise responses, while higher values will result in more verbose responses.
875 ///
876 /// Currently supported values are `low`, `medium`, and `high`.
877 #[serde(skip_serializing_if = "Option::is_none")]
878 pub verbosity: Option<Verbosity>,
879}
880
881#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
882#[serde(tag = "type", rename_all = "snake_case")]
883pub enum TextResponseFormatConfiguration {
884 /// Default response format. Used to generate text responses.
885 Text,
886 /// JSON object response format. An older method of generating JSON responses.
887 /// Using `json_schema` is recommended for models that support it.
888 /// Note that the model will not generate JSON without a system or user message
889 /// instructing it to do so.
890 JsonObject,
891 /// JSON Schema response format. Used to generate structured JSON responses.
892 /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
893 JsonSchema(ResponseFormatJsonSchema),
894}
895
896/// Definitions for model-callable tools.
897#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
898#[serde(tag = "type", rename_all = "snake_case")]
899pub enum Tool {
900 /// Defines a function in your own code the model can choose to call. Learn more about [function
901 /// calling](https://platform.openai.com/docs/guides/tools).
902 Function(FunctionTool),
903 /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
904 /// tool](https://platform.openai.com/docs/guides/tools-file-search).
905 FileSearch(FileSearchTool),
906 /// A tool that controls a virtual computer. Learn more about the [computer
907 /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
908 ComputerUsePreview(ComputerUsePreviewTool),
909 /// Search the Internet for sources related to the prompt. Learn more about the
910 /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
911 WebSearch(WebSearchTool),
912 /// type: web_search_2025_08_26
913 #[serde(rename = "web_search_2025_08_26")]
914 WebSearch20250826(WebSearchTool),
915 /// Give the model access to additional tools via remote Model Context Protocol
916 /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
917 Mcp(MCPTool),
918 /// A tool that runs Python code to help generate a response to a prompt.
919 CodeInterpreter(CodeInterpreterTool),
920 /// A tool that generates images using a model like `gpt-image-1`.
921 ImageGeneration(ImageGenTool),
922 /// A tool that allows the model to execute shell commands in a local environment.
923 LocalShell,
924 /// A tool that allows the model to execute shell commands.
925 Shell,
926 /// A custom tool that processes input using a specified format. Learn more about [custom
927 /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
928 Custom(CustomToolParam),
929 /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
930 ///tool](https://platform.openai.com/docs/guides/tools-web-search).
931 WebSearchPreview(WebSearchTool),
932 /// type: web_search_preview_2025_03_11
933 #[serde(rename = "web_search_preview_2025_03_11")]
934 WebSearchPreview20250311(WebSearchTool),
935 /// Allows the assistant to create, delete, or update files using unified diffs.
936 ApplyPatch,
937}
938
939#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
940pub struct CustomToolParam {
941 /// The name of the custom tool, used to identify it in tool calls.
942 pub name: String,
943 /// Optional description of the custom tool, used to provide more context.
944 pub description: Option<String>,
945 /// The input format for the custom tool. Default is unconstrained text.
946 pub format: CustomToolParamFormat,
947}
948
949#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
950#[serde(tag = "type", rename_all = "lowercase")]
951pub enum CustomToolParamFormat {
952 /// Unconstrained free-form text.
953 #[default]
954 Text,
955 /// A grammar defined by the user.
956 Grammar(CustomGrammarFormatParam),
957}
958
959#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
960#[builder(
961 name = "FileSearchToolArgs",
962 pattern = "mutable",
963 setter(into, strip_option),
964 default
965)]
966#[builder(build_fn(error = "OpenAIError"))]
967pub struct FileSearchTool {
968 /// The IDs of the vector stores to search.
969 pub vector_store_ids: Vec<String>,
970 /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
971 #[serde(skip_serializing_if = "Option::is_none")]
972 pub max_num_results: Option<u32>,
973 /// A filter to apply.
974 #[serde(skip_serializing_if = "Option::is_none")]
975 pub filters: Option<Filter>,
976 /// Ranking options for search.
977 #[serde(skip_serializing_if = "Option::is_none")]
978 pub ranking_options: Option<RankingOptions>,
979}
980
981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
982#[builder(
983 name = "FunctionToolArgs",
984 pattern = "mutable",
985 setter(into, strip_option),
986 default
987)]
988pub struct FunctionTool {
989 /// The name of the function to call.
990 pub name: String,
991 /// A JSON schema object describing the parameters of the function.
992 #[serde(skip_serializing_if = "Option::is_none")]
993 pub parameters: Option<serde_json::Value>,
994 /// Whether to enforce strict parameter validation. Default `true`.
995 #[serde(skip_serializing_if = "Option::is_none")]
996 pub strict: Option<bool>,
997 /// A description of the function. Used by the model to determine whether or not to call the
998 /// function.
999 #[serde(skip_serializing_if = "Option::is_none")]
1000 pub description: Option<String>,
1001}
1002
1003#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1004pub struct WebSearchToolFilters {
1005 /// Allowed domains for the search. If not provided, all domains are allowed.
1006 /// Subdomains of the provided domains are allowed as well.
1007 ///
1008 /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1009 #[serde(skip_serializing_if = "Option::is_none")]
1010 pub allowed_domains: Option<Vec<String>>,
1011}
1012
1013#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1014#[builder(
1015 name = "WebSearchToolArgs",
1016 pattern = "mutable",
1017 setter(into, strip_option),
1018 default
1019)]
1020pub struct WebSearchTool {
1021 /// Filters for the search.
1022 #[serde(skip_serializing_if = "Option::is_none")]
1023 pub filters: Option<WebSearchToolFilters>,
1024 /// The approximate location of the user.
1025 #[serde(skip_serializing_if = "Option::is_none")]
1026 pub user_location: Option<WebSearchApproximateLocation>,
1027 /// High level guidance for the amount of context window space to use for the search. One of `low`,
1028 /// `medium`, or `high`. `medium` is the default.
1029 #[serde(skip_serializing_if = "Option::is_none")]
1030 pub search_context_size: Option<WebSearchToolSearchContextSize>,
1031}
1032
1033#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1034#[serde(rename_all = "lowercase")]
1035pub enum WebSearchToolSearchContextSize {
1036 Low,
1037 #[default]
1038 Medium,
1039 High,
1040}
1041
1042#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1043#[serde(rename_all = "lowercase")]
1044pub enum ComputerEnvironment {
1045 Windows,
1046 Mac,
1047 Linux,
1048 Ubuntu,
1049 #[default]
1050 Browser,
1051}
1052
1053#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1054#[builder(
1055 name = "ComputerUsePreviewToolArgs",
1056 pattern = "mutable",
1057 setter(into, strip_option),
1058 default
1059)]
1060pub struct ComputerUsePreviewTool {
1061 /// The type of computer environment to control.
1062 environment: ComputerEnvironment,
1063 /// The width of the computer display.
1064 display_width: u32,
1065 /// The height of the computer display.
1066 display_height: u32,
1067}
1068
1069#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1070pub enum RankVersionType {
1071 #[serde(rename = "auto")]
1072 Auto,
1073 #[serde(rename = "default-2024-11-15")]
1074 Default20241115,
1075}
1076
1077#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1078pub struct HybridSearch {
1079 /// The weight of the embedding in the reciprocal ranking fusion.
1080 pub embedding_weight: f32,
1081 /// The weight of the text in the reciprocal ranking fusion.
1082 pub text_weight: f32,
1083}
1084
1085/// Options for search result ranking.
1086#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1087pub struct RankingOptions {
1088 /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1089 /// sparse keyword matches when hybrid search is enabled.
1090 #[serde(skip_serializing_if = "Option::is_none")]
1091 pub hybrid_search: Option<HybridSearch>,
1092 /// The ranker to use for the file search.
1093 pub ranker: RankVersionType,
1094 /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1095 /// attempt to return only the most relevant results, but may return fewer results.
1096 #[serde(skip_serializing_if = "Option::is_none")]
1097 pub score_threshold: Option<f32>,
1098}
1099
1100#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1101#[serde(rename_all = "lowercase")]
1102pub enum WebSearchApproximateLocationType {
1103 #[default]
1104 Approximate,
1105}
1106
1107/// Approximate user location for web search.
1108#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1109#[builder(
1110 name = "WebSearchApproximateLocationArgs",
1111 pattern = "mutable",
1112 setter(into, strip_option),
1113 default
1114)]
1115#[builder(build_fn(error = "OpenAIError"))]
1116pub struct WebSearchApproximateLocation {
1117 /// The type of location approximation. Always `approximate`.
1118 pub r#type: WebSearchApproximateLocationType,
1119 /// Free text input for the city of the user, e.g. `San Francisco`.
1120 #[serde(skip_serializing_if = "Option::is_none")]
1121 pub city: Option<String>,
1122 /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1123 /// e.g. `US`.
1124 #[serde(skip_serializing_if = "Option::is_none")]
1125 pub country: Option<String>,
1126 /// Free text input for the region of the user, e.g. `California`.
1127 #[serde(skip_serializing_if = "Option::is_none")]
1128 pub region: Option<String>,
1129 /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1130 /// `America/Los_Angeles`.
1131 #[serde(skip_serializing_if = "Option::is_none")]
1132 pub timezone: Option<String>,
1133}
1134
1135/// Container configuration for a code interpreter.
1136#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1137#[serde(tag = "type", rename_all = "snake_case")]
1138pub enum CodeInterpreterToolContainer {
1139 /// Configuration for a code interpreter container. Optionally specify the IDs of the
1140 /// files to run the code on.
1141 Auto(CodeInterpreterContainerAuto),
1142
1143 /// The container ID.
1144 #[serde(untagged)]
1145 ContainerID(String),
1146}
1147
1148/// Auto configuration for code interpreter container.
1149#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1150pub struct CodeInterpreterContainerAuto {
1151 /// An optional list of uploaded files to make available to your code.
1152 #[serde(skip_serializing_if = "Option::is_none")]
1153 pub file_ids: Option<Vec<String>>,
1154
1155 #[serde(skip_serializing_if = "Option::is_none")]
1156 pub memory_limit: Option<u64>,
1157}
1158
1159#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1160#[builder(
1161 name = "CodeInterpreterToolArgs",
1162 pattern = "mutable",
1163 setter(into, strip_option),
1164 default
1165)]
1166#[builder(build_fn(error = "OpenAIError"))]
1167pub struct CodeInterpreterTool {
1168 /// The code interpreter container. Can be a container ID or an object that
1169 /// specifies uploaded file IDs to make available to your code, along with an
1170 /// optional `memory_limit` setting.
1171 pub container: CodeInterpreterToolContainer,
1172}
1173
1174#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1175pub struct ImageGenToolInputImageMask {
1176 /// Base64-encoded mask image.
1177 #[serde(skip_serializing_if = "Option::is_none")]
1178 pub image_url: Option<String>,
1179 /// File ID for the mask image.
1180 #[serde(skip_serializing_if = "Option::is_none")]
1181 pub file_id: Option<String>,
1182}
1183
1184#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1185#[serde(rename_all = "lowercase")]
1186pub enum InputFidelity {
1187 #[default]
1188 High,
1189 Low,
1190}
1191
1192#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1193#[serde(rename_all = "lowercase")]
1194pub enum ImageGenToolModeration {
1195 #[default]
1196 Auto,
1197 Low,
1198}
1199
1200/// Image generation tool definition.
1201#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1202#[builder(
1203 name = "ImageGenerationArgs",
1204 pattern = "mutable",
1205 setter(into, strip_option),
1206 default
1207)]
1208#[builder(build_fn(error = "OpenAIError"))]
1209pub struct ImageGenTool {
1210 /// Background type for the generated image. One of `transparent`,
1211 /// `opaque`, or `auto`. Default: `auto`.
1212 #[serde(skip_serializing_if = "Option::is_none")]
1213 pub background: Option<ImageGenToolBackground>,
1214 /// Control how much effort the model will exert to match the style and features, especially facial features,
1215 /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1216 /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1217 #[serde(skip_serializing_if = "Option::is_none")]
1218 pub input_fidelity: Option<InputFidelity>,
1219 /// Optional mask for inpainting. Contains `image_url`
1220 /// (string, optional) and `file_id` (string, optional).
1221 #[serde(skip_serializing_if = "Option::is_none")]
1222 pub input_image_mask: Option<ImageGenToolInputImageMask>,
1223 /// The image generation model to use. Default: `gpt-image-1`.
1224 #[serde(skip_serializing_if = "Option::is_none")]
1225 pub model: Option<String>,
1226 /// Moderation level for the generated image. Default: `auto`.
1227 #[serde(skip_serializing_if = "Option::is_none")]
1228 pub moderation: Option<ImageGenToolModeration>,
1229 /// Compression level for the output image. Default: 100.
1230 #[serde(skip_serializing_if = "Option::is_none")]
1231 pub output_compression: Option<u8>,
1232 /// The output format of the generated image. One of `png`, `webp`, or
1233 /// `jpeg`. Default: `png`.
1234 #[serde(skip_serializing_if = "Option::is_none")]
1235 pub output_format: Option<ImageGenToolOutputFormat>,
1236 /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1237 #[serde(skip_serializing_if = "Option::is_none")]
1238 pub partial_images: Option<u8>,
1239 /// The quality of the generated image. One of `low`, `medium`, `high`,
1240 /// or `auto`. Default: `auto`.
1241 #[serde(skip_serializing_if = "Option::is_none")]
1242 pub quality: Option<ImageGenToolQuality>,
1243 /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1244 /// `1536x1024`, or `auto`. Default: `auto`.
1245 #[serde(skip_serializing_if = "Option::is_none")]
1246 pub size: Option<ImageGenToolSize>,
1247}
1248
1249#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1250#[serde(rename_all = "lowercase")]
1251pub enum ImageGenToolBackground {
1252 Transparent,
1253 Opaque,
1254 #[default]
1255 Auto,
1256}
1257
1258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1259#[serde(rename_all = "lowercase")]
1260pub enum ImageGenToolOutputFormat {
1261 #[default]
1262 Png,
1263 Webp,
1264 Jpeg,
1265}
1266
1267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1268#[serde(rename_all = "lowercase")]
1269pub enum ImageGenToolQuality {
1270 Low,
1271 Medium,
1272 High,
1273 #[default]
1274 Auto,
1275}
1276
1277#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1278#[serde(rename_all = "lowercase")]
1279pub enum ImageGenToolSize {
1280 #[default]
1281 Auto,
1282 #[serde(rename = "1024x1024")]
1283 Size1024x1024,
1284 #[serde(rename = "1024x1536")]
1285 Size1024x1536,
1286 #[serde(rename = "1536x1024")]
1287 Size1536x1024,
1288}
1289
1290#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1291#[serde(rename_all = "lowercase")]
1292pub enum ToolChoiceAllowedMode {
1293 Auto,
1294 Required,
1295}
1296
1297#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1298pub struct ToolChoiceAllowed {
1299 /// Constrains the tools available to the model to a pre-defined set.
1300 ///
1301 /// `auto` allows the model to pick from among the allowed tools and generate a
1302 /// message.
1303 ///
1304 /// `required` requires the model to call one or more of the allowed tools.
1305 pub mode: ToolChoiceAllowedMode,
1306 /// A list of tool definitions that the model should be allowed to call.
1307 ///
1308 /// For the Responses API, the list of tool definitions might look like:
1309 /// ```json
1310 /// [
1311 /// { "type": "function", "name": "get_weather" },
1312 /// { "type": "mcp", "server_label": "deepwiki" },
1313 /// { "type": "image_generation" }
1314 /// ]
1315 /// ```
1316 pub tools: Vec<serde_json::Value>,
1317}
1318
1319/// The type of hosted tool the model should to use. Learn more about
1320/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1321#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1322#[serde(tag = "type", rename_all = "snake_case")]
1323pub enum ToolChoiceTypes {
1324 FileSearch,
1325 WebSearchPreview,
1326 ComputerUsePreview,
1327 CodeInterpreter,
1328 ImageGeneration,
1329}
1330
1331#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1332pub struct ToolChoiceFunction {
1333 /// The name of the function to call.
1334 pub name: String,
1335}
1336
1337#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1338pub struct ToolChoiceMCP {
1339 /// The name of the tool to call on the server.
1340 pub name: String,
1341 /// The label of the MCP server to use.
1342 pub server_label: String,
1343}
1344
1345#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1346pub struct ToolChoiceCustom {
1347 /// The name of the custom tool to call.
1348 pub name: String,
1349}
1350
1351#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1352#[serde(tag = "type", rename_all = "snake_case")]
1353pub enum ToolChoiceParam {
1354 /// Constrains the tools available to the model to a pre-defined set.
1355 AllowedTools(ToolChoiceAllowed),
1356
1357 /// Use this option to force the model to call a specific function.
1358 Function(ToolChoiceFunction),
1359
1360 /// Use this option to force the model to call a specific tool on a remote MCP server.
1361 Mcp(ToolChoiceMCP),
1362
1363 /// Use this option to force the model to call a custom tool.
1364 Custom(ToolChoiceCustom),
1365
1366 /// Forces the model to call the apply_patch tool when executing a tool call.
1367 ApplyPatch,
1368
1369 /// Forces the model to call the function shell tool when a tool call is required.
1370 Shell,
1371
1372 /// Indicates that the model should use a built-in tool to generate a response.
1373 /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1374 #[serde(untagged)]
1375 Hosted(ToolChoiceTypes),
1376
1377 /// Controls which (if any) tool is called by the model.
1378 ///
1379 /// `none` means the model will not call any tool and instead generates a message.
1380 ///
1381 /// `auto` means the model can pick between generating a message or calling one or
1382 /// more tools.
1383 ///
1384 /// `required` means the model must call one or more tools.
1385 #[serde(untagged)]
1386 Mode(ToolChoiceOptions),
1387}
1388
1389#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1390#[serde(rename_all = "lowercase")]
1391pub enum ToolChoiceOptions {
1392 None,
1393 Auto,
1394 Required,
1395}
1396
1397/// An error that occurred while generating the response.
1398#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1399pub struct ErrorObject {
1400 /// A machine-readable error code that was returned.
1401 pub code: String,
1402 /// A human-readable description of the error that was returned.
1403 pub message: String,
1404}
1405
1406/// Details about an incomplete response.
1407#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1408pub struct IncompleteDetails {
1409 /// The reason why the response is incomplete.
1410 pub reason: String,
1411}
1412
1413#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1414pub struct TopLogProb {
1415 pub bytes: Vec<u8>,
1416 pub logprob: f64,
1417 pub token: String,
1418}
1419
1420#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1421pub struct LogProb {
1422 pub bytes: Vec<u8>,
1423 pub logprob: f64,
1424 pub token: String,
1425 pub top_logprobs: Vec<TopLogProb>,
1426}
1427
1428#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1429pub struct ResponseTopLobProb {
1430 /// The log probability of this token.
1431 pub logprob: f64,
1432 /// A possible text token.
1433 pub token: String,
1434}
1435
1436#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1437pub struct ResponseLogProb {
1438 /// The log probability of this token.
1439 pub logprob: f64,
1440 /// A possible text token.
1441 pub token: String,
1442 /// The log probability of the top 20 most likely tokens.
1443 pub top_logprobs: Vec<ResponseTopLobProb>,
1444}
1445
1446/// A simple text output from the model.
1447#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1448pub struct OutputTextContent {
1449 /// The annotations of the text output.
1450 pub annotations: Vec<Annotation>,
1451 pub logprobs: Option<Vec<LogProb>>,
1452 /// The text output from the model.
1453 pub text: String,
1454}
1455
1456/// An annotation that applies to a span of output text.
1457#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1458#[serde(tag = "type", rename_all = "snake_case")]
1459pub enum Annotation {
1460 /// A citation to a file.
1461 FileCitation(FileCitationBody),
1462 /// A citation for a web resource used to generate a model response.
1463 UrlCitation(UrlCitationBody),
1464 /// A citation for a container file used to generate a model response.
1465 ContainerFileCitation(ContainerFileCitationBody),
1466 /// A path to a file.
1467 FilePath(FilePath),
1468}
1469
1470#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1471pub struct FileCitationBody {
1472 /// The ID of the file.
1473 file_id: String,
1474 /// The filename of the file cited.
1475 filename: String,
1476 /// The index of the file in the list of files.
1477 index: u32,
1478}
1479
1480#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1481pub struct UrlCitationBody {
1482 /// The index of the last character of the URL citation in the message.
1483 end_index: u32,
1484 /// The index of the first character of the URL citation in the message.
1485 start_index: u32,
1486 /// The title of the web resource.
1487 title: String,
1488 /// The URL of the web resource.
1489 url: String,
1490}
1491
1492#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1493pub struct ContainerFileCitationBody {
1494 /// The ID of the container file.
1495 container_id: String,
1496 /// The index of the last character of the container file citation in the message.
1497 end_index: u32,
1498 /// The ID of the file.
1499 file_id: String,
1500 /// The filename of the container file cited.
1501 filename: String,
1502 /// The index of the first character of the container file citation in the message.
1503 start_index: u32,
1504}
1505
1506#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1507pub struct FilePath {
1508 /// The ID of the file.
1509 file_id: String,
1510 /// The index of the file in the list of files.
1511 index: u32,
1512}
1513
1514/// A refusal explanation from the model.
1515#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1516pub struct RefusalContent {
1517 /// The refusal explanation from the model.
1518 pub refusal: String,
1519}
1520
1521/// A message generated by the model.
1522#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1523pub struct OutputMessage {
1524 /// The content of the output message.
1525 pub content: Vec<OutputMessageContent>,
1526 /// The unique ID of the output message.
1527 pub id: String,
1528 /// The role of the output message. Always `assistant`.
1529 pub role: AssistantRole,
1530 /// The status of the message input. One of `in_progress`, `completed`, or
1531 /// `incomplete`. Populated when input items are returned via API.
1532 pub status: OutputStatus,
1533 ///// The type of the output message. Always `message`.
1534 //pub r#type: MessageType,
1535}
1536
1537#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1538#[serde(rename_all = "lowercase")]
1539pub enum MessageType {
1540 #[default]
1541 Message,
1542}
1543
1544/// The role for an output message - always `assistant`.
1545/// This type ensures type safety by only allowing the assistant role.
1546#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1547#[serde(rename_all = "lowercase")]
1548pub enum AssistantRole {
1549 #[default]
1550 Assistant,
1551}
1552
1553#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1554#[serde(tag = "type", rename_all = "snake_case")]
1555pub enum OutputMessageContent {
1556 /// A text output from the model.
1557 OutputText(OutputTextContent),
1558 /// A refusal from the model.
1559 Refusal(RefusalContent),
1560}
1561
1562#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1563#[serde(tag = "type", rename_all = "snake_case")]
1564pub enum OutputContent {
1565 /// A text output from the model.
1566 OutputText(OutputTextContent),
1567 /// A refusal from the model.
1568 Refusal(RefusalContent),
1569 /// Reasoning text from the model.
1570 ReasoningText(ReasoningTextContent),
1571}
1572
1573#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1574pub struct ReasoningTextContent {
1575 /// The reasoning text from the model.
1576 pub text: String,
1577}
1578
1579/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1580#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1581pub struct ReasoningItem {
1582 /// Unique identifier of the reasoning content.
1583 pub id: String,
1584 /// Reasoning summary content.
1585 pub summary: Vec<SummaryPart>,
1586 /// Reasoning text content.
1587 #[serde(skip_serializing_if = "Option::is_none")]
1588 pub content: Option<Vec<ReasoningTextContent>>,
1589 /// The encrypted content of the reasoning item - populated when a response is generated with
1590 /// `reasoning.encrypted_content` in the `include` parameter.
1591 #[serde(skip_serializing_if = "Option::is_none")]
1592 pub encrypted_content: Option<String>,
1593 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1594 /// Populated when items are returned via API.
1595 #[serde(skip_serializing_if = "Option::is_none")]
1596 pub status: Option<OutputStatus>,
1597}
1598
1599/// A single summary text fragment from reasoning.
1600#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1601pub struct Summary {
1602 /// A summary of the reasoning output from the model so far.
1603 pub text: String,
1604}
1605
1606#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1607#[serde(tag = "type", rename_all = "snake_case")]
1608pub enum SummaryPart {
1609 SummaryText(Summary),
1610}
1611
1612/// File search tool call output.
1613#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1614pub struct FileSearchToolCall {
1615 /// The unique ID of the file search tool call.
1616 pub id: String,
1617 /// The queries used to search for files.
1618 pub queries: Vec<String>,
1619 /// The status of the file search tool call. One of `in_progress`, `searching`,
1620 /// `incomplete`,`failed`, or `completed`.
1621 pub status: FileSearchToolCallStatus,
1622 /// The results of the file search tool call.
1623 #[serde(skip_serializing_if = "Option::is_none")]
1624 pub results: Option<Vec<FileSearchToolCallResult>>,
1625}
1626
1627#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1628#[serde(rename_all = "snake_case")]
1629pub enum FileSearchToolCallStatus {
1630 InProgress,
1631 Searching,
1632 Incomplete,
1633 Failed,
1634 Completed,
1635}
1636
1637/// A single result from a file search.
1638#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1639pub struct FileSearchToolCallResult {
1640 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1641 /// additional information about the object in a structured format, and querying for objects
1642 /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1643 /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1644 pub attributes: HashMap<String, serde_json::Value>,
1645 /// The unique ID of the file.
1646 pub file_id: String,
1647 /// The name of the file.
1648 pub filename: String,
1649 /// The relevance score of the file - a value between 0 and 1.
1650 pub score: f32,
1651 /// The text that was retrieved from the file.
1652 pub text: String,
1653}
1654
1655#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1656pub struct ComputerCallSafetyCheckParam {
1657 /// The ID of the pending safety check.
1658 pub id: String,
1659 /// The type of the pending safety check.
1660 #[serde(skip_serializing_if = "Option::is_none")]
1661 pub code: Option<String>,
1662 /// Details about the pending safety check.
1663 #[serde(skip_serializing_if = "Option::is_none")]
1664 pub message: Option<String>,
1665}
1666
1667#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1668#[serde(rename_all = "snake_case")]
1669pub enum WebSearchToolCallStatus {
1670 InProgress,
1671 Searching,
1672 Completed,
1673 Failed,
1674}
1675
1676#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1677pub struct WebSearchActionSearchSource {
1678 /// The type of source. Always `url`.
1679 pub r#type: String,
1680 /// The URL of the source.
1681 pub url: String,
1682}
1683
1684#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1685pub struct WebSearchActionSearch {
1686 /// The search query.
1687 pub query: String,
1688 /// The sources used in the search.
1689 pub sources: Option<Vec<WebSearchActionSearchSource>>,
1690}
1691
1692#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1693pub struct WebSearchActionOpenPage {
1694 /// The URL opened by the model.
1695 pub url: Option<String>,
1696}
1697
1698#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1699pub struct WebSearchActionFind {
1700 /// The URL of the page searched for the pattern.
1701 pub url: String,
1702 /// The pattern or text to search for within the page.
1703 pub pattern: String,
1704}
1705
1706#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1707#[serde(tag = "type", rename_all = "snake_case")]
1708pub enum WebSearchToolCallAction {
1709 /// Action type "search" - Performs a web search query.
1710 Search(WebSearchActionSearch),
1711 /// Action type "open_page" - Opens a specific URL from search results.
1712 OpenPage(WebSearchActionOpenPage),
1713 /// Action type "find": Searches for a pattern within a loaded page.
1714 Find(WebSearchActionFind),
1715 /// Action type "find_in_page": https://platform.openai.com/docs/guides/tools-web-search#output-and-citations
1716 FindInPage(WebSearchActionFind),
1717}
1718
1719/// Web search tool call output.
1720#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1721pub struct WebSearchToolCall {
1722 /// An object describing the specific action taken in this web search call. Includes
1723 /// details on how the model used the web (search, open_page, find, find_in_page).
1724 pub action: WebSearchToolCallAction,
1725 /// The unique ID of the web search tool call.
1726 pub id: String,
1727 /// The status of the web search tool call.
1728 pub status: WebSearchToolCallStatus,
1729}
1730
1731/// Output from a computer tool call.
1732#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1733pub struct ComputerToolCall {
1734 pub action: ComputerAction,
1735 /// An identifier used when responding to the tool call with output.
1736 pub call_id: String,
1737 /// The unique ID of the computer call.
1738 pub id: String,
1739 /// The pending safety checks for the computer call.
1740 pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1741 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1742 /// Populated when items are returned via API.
1743 pub status: OutputStatus,
1744}
1745
1746/// A point in 2D space.
1747#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1748pub struct DragPoint {
1749 /// The x-coordinate.
1750 pub x: i32,
1751 /// The y-coordinate.
1752 pub y: i32,
1753}
1754
1755/// Represents all user‐triggered actions.
1756#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1757#[serde(tag = "type", rename_all = "snake_case")]
1758pub enum ComputerAction {
1759 /// A click action.
1760 Click(ClickParam),
1761
1762 /// A double click action.
1763 DoubleClick(DoubleClickAction),
1764
1765 /// A drag action.
1766 Drag(Drag),
1767
1768 /// A collection of keypresses the model would like to perform.
1769 Keypress(KeyPressAction),
1770
1771 /// A mouse move action.
1772 Move(Move),
1773
1774 /// A screenshot action.
1775 Screenshot,
1776
1777 /// A scroll action.
1778 Scroll(Scroll),
1779
1780 /// An action to type in text.
1781 Type(Type),
1782
1783 /// A wait action.
1784 Wait,
1785}
1786
1787#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1788#[serde(rename_all = "lowercase")]
1789pub enum ClickButtonType {
1790 Left,
1791 Right,
1792 Wheel,
1793 Back,
1794 Forward,
1795}
1796
1797/// A click action.
1798#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1799pub struct ClickParam {
1800 /// Indicates which mouse button was pressed during the click. One of `left`,
1801 /// `right`, `wheel`, `back`, or `forward`.
1802 pub button: ClickButtonType,
1803 /// The x-coordinate where the click occurred.
1804 pub x: i32,
1805 /// The y-coordinate where the click occurred.
1806 pub y: i32,
1807}
1808
1809/// A double click action.
1810#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1811pub struct DoubleClickAction {
1812 /// The x-coordinate where the double click occurred.
1813 pub x: i32,
1814 /// The y-coordinate where the double click occurred.
1815 pub y: i32,
1816}
1817
1818/// A drag action.
1819#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1820pub struct Drag {
1821 /// The path of points the cursor drags through.
1822 pub path: Vec<DragPoint>,
1823}
1824
1825/// A keypress action.
1826#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1827pub struct KeyPressAction {
1828 /// The combination of keys the model is requesting to be pressed.
1829 /// This is an array of strings, each representing a key.
1830 pub keys: Vec<String>,
1831}
1832
1833/// A mouse move action.
1834#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1835pub struct Move {
1836 /// The x-coordinate to move to.
1837 pub x: i32,
1838 /// The y-coordinate to move to.
1839 pub y: i32,
1840}
1841
1842/// A scroll action.
1843#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1844pub struct Scroll {
1845 /// The horizontal scroll distance.
1846 pub scroll_x: i32,
1847 /// The vertical scroll distance.
1848 pub scroll_y: i32,
1849 /// The x-coordinate where the scroll occurred.
1850 pub x: i32,
1851 /// The y-coordinate where the scroll occurred.
1852 pub y: i32,
1853}
1854
1855/// A typing (text entry) action.
1856#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1857pub struct Type {
1858 /// The text to type.
1859 pub text: String,
1860}
1861
1862#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1863pub struct FunctionToolCall {
1864 /// A JSON string of the arguments to pass to the function.
1865 pub arguments: String,
1866 /// The unique ID of the function tool call generated by the model.
1867 pub call_id: String,
1868 /// The name of the function to run.
1869 pub name: String,
1870 /// The unique ID of the function tool call.
1871 #[serde(skip_serializing_if = "Option::is_none")]
1872 pub id: Option<String>,
1873 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1874 /// Populated when items are returned via API.
1875 #[serde(skip_serializing_if = "Option::is_none")]
1876 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1877}
1878
1879#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1880#[serde(rename_all = "snake_case")]
1881pub enum ImageGenToolCallStatus {
1882 InProgress,
1883 Completed,
1884 Generating,
1885 Failed,
1886}
1887
1888#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1889pub struct ImageGenToolCall {
1890 /// The unique ID of the image generation call.
1891 pub id: String,
1892 /// The generated image encoded in base64.
1893 pub result: Option<String>,
1894 /// The status of the image generation call.
1895 pub status: ImageGenToolCallStatus,
1896}
1897
1898#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1899#[serde(rename_all = "snake_case")]
1900pub enum CodeInterpreterToolCallStatus {
1901 InProgress,
1902 Completed,
1903 Incomplete,
1904 Interpreting,
1905 Failed,
1906}
1907
1908/// Output of a code interpreter request.
1909#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1910pub struct CodeInterpreterToolCall {
1911 /// The code to run, or null if not available.
1912 #[serde(skip_serializing_if = "Option::is_none")]
1913 pub code: Option<String>,
1914 /// ID of the container used to run the code.
1915 pub container_id: String,
1916 /// The unique ID of the code interpreter tool call.
1917 pub id: String,
1918 /// The outputs generated by the code interpreter, such as logs or images.
1919 /// Can be null if no outputs are available.
1920 #[serde(skip_serializing_if = "Option::is_none")]
1921 pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1922 /// The status of the code interpreter tool call.
1923 /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1924 pub status: CodeInterpreterToolCallStatus,
1925}
1926
1927/// Individual result from a code interpreter: either logs or files.
1928#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1929#[serde(tag = "type", rename_all = "snake_case")]
1930pub enum CodeInterpreterToolCallOutput {
1931 /// Code interpreter output logs
1932 Logs(CodeInterpreterOutputLogs),
1933 /// Code interpreter output image
1934 Image(CodeInterpreterOutputImage),
1935}
1936
1937#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1938pub struct CodeInterpreterOutputLogs {
1939 /// The logs output from the code interpreter.
1940 pub logs: String,
1941}
1942
1943#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1944pub struct CodeInterpreterOutputImage {
1945 /// The URL of the image output from the code interpreter.
1946 pub url: String,
1947}
1948
1949#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1950pub struct CodeInterpreterFile {
1951 /// The ID of the file.
1952 file_id: String,
1953 /// The MIME type of the file.
1954 mime_type: String,
1955}
1956
1957#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1958pub struct LocalShellToolCall {
1959 /// Execute a shell command on the server.
1960 pub action: LocalShellExecAction,
1961 /// The unique ID of the local shell tool call generated by the model.
1962 pub call_id: String,
1963 /// The unique ID of the local shell call.
1964 pub id: String,
1965 /// The status of the local shell call.
1966 pub status: OutputStatus,
1967}
1968
1969/// Define the shape of a local shell action (exec).
1970#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1971pub struct LocalShellExecAction {
1972 /// The command to run.
1973 pub command: Vec<String>,
1974 /// Environment variables to set for the command.
1975 pub env: HashMap<String, String>,
1976 /// Optional timeout in milliseconds for the command.
1977 pub timeout_ms: Option<u64>,
1978 /// Optional user to run the command as.
1979 pub user: Option<String>,
1980 /// Optional working directory to run the command in.
1981 pub working_directory: Option<String>,
1982}
1983
1984/// Commands and limits describing how to run the shell tool call.
1985#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1986pub struct FunctionShellActionParam {
1987 /// Ordered shell commands for the execution environment to run.
1988 pub commands: Vec<String>,
1989 /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
1990 #[serde(skip_serializing_if = "Option::is_none")]
1991 pub timeout_ms: Option<u64>,
1992 /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
1993 #[serde(skip_serializing_if = "Option::is_none")]
1994 pub max_output_length: Option<u64>,
1995}
1996
1997/// Status values reported for shell tool calls.
1998#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1999#[serde(rename_all = "snake_case")]
2000pub enum FunctionShellCallItemStatus {
2001 InProgress,
2002 Completed,
2003 Incomplete,
2004}
2005
2006/// A tool representing a request to execute one or more shell commands.
2007#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2008pub struct FunctionShellCallItemParam {
2009 /// The unique ID of the shell tool call. Populated when this item is returned via API.
2010 #[serde(skip_serializing_if = "Option::is_none")]
2011 pub id: Option<String>,
2012 /// The unique ID of the shell tool call generated by the model.
2013 pub call_id: String,
2014 /// The shell commands and limits that describe how to run the tool call.
2015 pub action: FunctionShellActionParam,
2016 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2017 #[serde(skip_serializing_if = "Option::is_none")]
2018 pub status: Option<FunctionShellCallItemStatus>,
2019}
2020
2021/// Indicates that the shell commands finished and returned an exit code.
2022#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2023pub struct FunctionShellCallOutputExitOutcomeParam {
2024 /// The exit code returned by the shell process.
2025 pub exit_code: i32,
2026}
2027
2028/// The exit or timeout outcome associated with this chunk.
2029#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2030#[serde(tag = "type", rename_all = "snake_case")]
2031pub enum FunctionShellCallOutputOutcomeParam {
2032 Timeout,
2033 Exit(FunctionShellCallOutputExitOutcomeParam),
2034}
2035
2036/// Captured stdout and stderr for a portion of a shell tool call output.
2037#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2038pub struct FunctionShellCallOutputContentParam {
2039 /// Captured stdout output for this chunk of the shell call.
2040 pub stdout: String,
2041 /// Captured stderr output for this chunk of the shell call.
2042 pub stderr: String,
2043 /// The exit or timeout outcome associated with this chunk.
2044 pub outcome: FunctionShellCallOutputOutcomeParam,
2045}
2046
2047/// The streamed output items emitted by a shell tool call.
2048#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2049pub struct FunctionShellCallOutputItemParam {
2050 /// The unique ID of the shell tool call output. Populated when this item is returned via API.
2051 #[serde(skip_serializing_if = "Option::is_none")]
2052 pub id: Option<String>,
2053 /// The unique ID of the shell tool call generated by the model.
2054 pub call_id: String,
2055 /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2056 pub output: Vec<FunctionShellCallOutputContentParam>,
2057 /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2058 #[serde(skip_serializing_if = "Option::is_none")]
2059 pub max_output_length: Option<u64>,
2060}
2061
2062/// Status values reported for apply_patch tool calls.
2063#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2064#[serde(rename_all = "snake_case")]
2065pub enum ApplyPatchCallStatusParam {
2066 InProgress,
2067 Completed,
2068}
2069
2070/// Instruction for creating a new file via the apply_patch tool.
2071#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2072pub struct ApplyPatchCreateFileOperationParam {
2073 /// Path of the file to create relative to the workspace root.
2074 pub path: String,
2075 /// Unified diff content to apply when creating the file.
2076 pub diff: String,
2077}
2078
2079/// Instruction for deleting an existing file via the apply_patch tool.
2080#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2081pub struct ApplyPatchDeleteFileOperationParam {
2082 /// Path of the file to delete relative to the workspace root.
2083 pub path: String,
2084}
2085
2086/// Instruction for updating an existing file via the apply_patch tool.
2087#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2088pub struct ApplyPatchUpdateFileOperationParam {
2089 /// Path of the file to update relative to the workspace root.
2090 pub path: String,
2091 /// Unified diff content to apply to the existing file.
2092 pub diff: String,
2093}
2094
2095/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2096#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2097#[serde(tag = "type", rename_all = "snake_case")]
2098pub enum ApplyPatchOperationParam {
2099 CreateFile(ApplyPatchCreateFileOperationParam),
2100 DeleteFile(ApplyPatchDeleteFileOperationParam),
2101 UpdateFile(ApplyPatchUpdateFileOperationParam),
2102}
2103
2104/// A tool call representing a request to create, delete, or update files using diff patches.
2105#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2106pub struct ApplyPatchToolCallItemParam {
2107 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2108 #[serde(skip_serializing_if = "Option::is_none")]
2109 pub id: Option<String>,
2110 /// The unique ID of the apply patch tool call generated by the model.
2111 pub call_id: String,
2112 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2113 pub status: ApplyPatchCallStatusParam,
2114 /// The specific create, delete, or update instruction for the apply_patch tool call.
2115 pub operation: ApplyPatchOperationParam,
2116}
2117
2118/// Outcome values reported for apply_patch tool call outputs.
2119#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2120#[serde(rename_all = "snake_case")]
2121pub enum ApplyPatchCallOutputStatusParam {
2122 Completed,
2123 Failed,
2124}
2125
2126/// The streamed output emitted by an apply patch tool call.
2127#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2128pub struct ApplyPatchToolCallOutputItemParam {
2129 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2130 #[serde(skip_serializing_if = "Option::is_none")]
2131 pub id: Option<String>,
2132 /// The unique ID of the apply patch tool call generated by the model.
2133 pub call_id: String,
2134 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2135 pub status: ApplyPatchCallOutputStatusParam,
2136 /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2137 #[serde(skip_serializing_if = "Option::is_none")]
2138 pub output: Option<String>,
2139}
2140
2141/// Shell exec action
2142/// Execute a shell command.
2143#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2144pub struct FunctionShellAction {
2145 /// A list of commands to run.
2146 pub commands: Vec<String>,
2147 /// Optional timeout in milliseconds for the commands.
2148 pub timeout_ms: Option<u64>,
2149 /// Optional maximum number of characters to return from each command.
2150 pub max_output_length: Option<u64>,
2151}
2152
2153/// Status values reported for function shell tool calls.
2154#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2155#[serde(rename_all = "snake_case")]
2156pub enum LocalShellCallStatus {
2157 InProgress,
2158 Completed,
2159 Incomplete,
2160}
2161
2162/// A tool call that executes one or more shell commands in a managed environment.
2163#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2164pub struct FunctionShellCall {
2165 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2166 pub id: String,
2167 /// The unique ID of the function shell tool call generated by the model.
2168 pub call_id: String,
2169 /// The shell commands and limits that describe how to run the tool call.
2170 pub action: FunctionShellAction,
2171 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2172 pub status: LocalShellCallStatus,
2173 /// The ID of the entity that created this tool call.
2174 #[serde(skip_serializing_if = "Option::is_none")]
2175 pub created_by: Option<String>,
2176}
2177
2178/// The content of a shell tool call output that was emitted.
2179#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2180pub struct FunctionShellCallOutputContent {
2181 /// The standard output that was captured.
2182 pub stdout: String,
2183 /// The standard error output that was captured.
2184 pub stderr: String,
2185 /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2186 #[serde(flatten)]
2187 pub outcome: FunctionShellCallOutputOutcome,
2188 /// The identifier of the actor that created the item.
2189 #[serde(skip_serializing_if = "Option::is_none")]
2190 pub created_by: Option<String>,
2191}
2192
2193/// Function shell call outcome
2194#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2195#[serde(tag = "type", rename_all = "snake_case")]
2196pub enum FunctionShellCallOutputOutcome {
2197 Timeout,
2198 Exit(FunctionShellCallOutputExitOutcome),
2199}
2200
2201/// Indicates that the shell commands finished and returned an exit code.
2202#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2203pub struct FunctionShellCallOutputExitOutcome {
2204 /// Exit code from the shell process.
2205 pub exit_code: i32,
2206}
2207
2208/// The output of a shell tool call that was emitted.
2209#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2210pub struct FunctionShellCallOutput {
2211 /// The unique ID of the shell call output. Populated when this item is returned via API.
2212 pub id: String,
2213 /// The unique ID of the shell tool call generated by the model.
2214 pub call_id: String,
2215 /// An array of shell call output contents
2216 pub output: Vec<FunctionShellCallOutputContent>,
2217 /// The maximum length of the shell command output. This is generated by the model and should be
2218 /// passed back with the raw output.
2219 pub max_output_length: Option<u64>,
2220 /// The identifier of the actor that created the item.
2221 #[serde(skip_serializing_if = "Option::is_none")]
2222 pub created_by: Option<String>,
2223}
2224
2225/// Status values reported for apply_patch tool calls.
2226#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2227#[serde(rename_all = "snake_case")]
2228pub enum ApplyPatchCallStatus {
2229 InProgress,
2230 Completed,
2231}
2232
2233/// Instruction describing how to create a file via the apply_patch tool.
2234#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2235pub struct ApplyPatchCreateFileOperation {
2236 /// Path of the file to create.
2237 pub path: String,
2238 /// Diff to apply.
2239 pub diff: String,
2240}
2241
2242/// Instruction describing how to delete a file via the apply_patch tool.
2243#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2244pub struct ApplyPatchDeleteFileOperation {
2245 /// Path of the file to delete.
2246 pub path: String,
2247}
2248
2249/// Instruction describing how to update a file via the apply_patch tool.
2250#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2251pub struct ApplyPatchUpdateFileOperation {
2252 /// Path of the file to update.
2253 pub path: String,
2254 /// Diff to apply.
2255 pub diff: String,
2256}
2257
2258/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2259#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2260#[serde(tag = "type", rename_all = "snake_case")]
2261pub enum ApplyPatchOperation {
2262 CreateFile(ApplyPatchCreateFileOperation),
2263 DeleteFile(ApplyPatchDeleteFileOperation),
2264 UpdateFile(ApplyPatchUpdateFileOperation),
2265}
2266
2267/// A tool call that applies file diffs by creating, deleting, or updating files.
2268#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2269pub struct ApplyPatchToolCall {
2270 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2271 pub id: String,
2272 /// The unique ID of the apply patch tool call generated by the model.
2273 pub call_id: String,
2274 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2275 pub status: ApplyPatchCallStatus,
2276 /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2277 pub operation: ApplyPatchOperation,
2278 /// The ID of the entity that created this tool call.
2279 #[serde(skip_serializing_if = "Option::is_none")]
2280 pub created_by: Option<String>,
2281}
2282
2283/// Outcome values reported for apply_patch tool call outputs.
2284#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2285#[serde(rename_all = "snake_case")]
2286pub enum ApplyPatchCallOutputStatus {
2287 Completed,
2288 Failed,
2289}
2290
2291/// The output emitted by an apply patch tool call.
2292#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2293pub struct ApplyPatchToolCallOutput {
2294 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2295 pub id: String,
2296 /// The unique ID of the apply patch tool call generated by the model.
2297 pub call_id: String,
2298 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2299 pub status: ApplyPatchCallOutputStatus,
2300 /// Optional textual output returned by the apply patch tool.
2301 pub output: Option<String>,
2302 /// The ID of the entity that created this tool call output.
2303 #[serde(skip_serializing_if = "Option::is_none")]
2304 pub created_by: Option<String>,
2305}
2306
2307/// Output of an MCP server tool invocation.
2308#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2309pub struct MCPToolCall {
2310 /// A JSON string of the arguments passed to the tool.
2311 pub arguments: String,
2312 /// The unique ID of the tool call.
2313 pub id: String,
2314 /// The name of the tool that was run.
2315 pub name: String,
2316 /// The label of the MCP server running the tool.
2317 pub server_label: String,
2318 /// Unique identifier for the MCP tool call approval request. Include this value
2319 /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2320 /// tool call.
2321 pub approval_request_id: Option<String>,
2322 /// Error message from the call, if any.
2323 pub error: Option<String>,
2324 /// The output from the tool call.
2325 pub output: Option<String>,
2326 /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2327 /// `calling`, or `failed`.
2328 pub status: Option<MCPToolCallStatus>,
2329}
2330
2331#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2332#[serde(rename_all = "snake_case")]
2333pub enum MCPToolCallStatus {
2334 InProgress,
2335 Completed,
2336 Incomplete,
2337 Calling,
2338 Failed,
2339}
2340
2341#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2342pub struct MCPListTools {
2343 /// The unique ID of the list.
2344 pub id: String,
2345 /// The label of the MCP server.
2346 pub server_label: String,
2347 /// The tools available on the server.
2348 pub tools: Vec<MCPListToolsTool>,
2349 /// Error message if listing failed.
2350 #[serde(skip_serializing_if = "Option::is_none")]
2351 pub error: Option<String>,
2352}
2353
2354#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2355pub struct MCPApprovalRequest {
2356 /// JSON string of arguments for the tool.
2357 pub arguments: String,
2358 /// The unique ID of the approval request.
2359 pub id: String,
2360 /// The name of the tool to run.
2361 pub name: String,
2362 /// The label of the MCP server making the request.
2363 pub server_label: String,
2364}
2365
2366#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2367#[serde(untagged)]
2368pub enum Instructions {
2369 /// A text input to the model, equivalent to a text input with the `developer` role.
2370 Text(String),
2371 /// A list of one or many input items to the model, containing different content types.
2372 Array(Vec<InputItem>),
2373}
2374
2375/// The complete response returned by the Responses API.
2376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2377pub struct Response {
2378 /// Whether to run the model response in the background.
2379 /// [Learn more](https://platform.openai.com/docs/guides/background).
2380 #[serde(skip_serializing_if = "Option::is_none")]
2381 pub background: Option<bool>,
2382
2383 /// Billing information for the response.
2384 #[serde(skip_serializing_if = "Option::is_none")]
2385 pub billing: Option<Billing>,
2386
2387 /// The conversation that this response belongs to. Input items and output
2388 /// items from this response are automatically added to this conversation.
2389 #[serde(skip_serializing_if = "Option::is_none")]
2390 pub conversation: Option<Conversation>,
2391
2392 /// Unix timestamp (in seconds) when this Response was created.
2393 pub created_at: u64,
2394
2395 /// Unix timestamp (in seconds) of when this Response was completed.
2396 /// Only present when the status is `completed`.
2397 #[serde(skip_serializing_if = "Option::is_none")]
2398 pub completed_at: Option<u64>,
2399
2400 /// An error object returned when the model fails to generate a Response.
2401 #[serde(skip_serializing_if = "Option::is_none")]
2402 pub error: Option<ErrorObject>,
2403
2404 /// Unique identifier for this response.
2405 pub id: String,
2406
2407 /// Details about why the response is incomplete, if any.
2408 #[serde(skip_serializing_if = "Option::is_none")]
2409 pub incomplete_details: Option<IncompleteDetails>,
2410
2411 /// A system (or developer) message inserted into the model's context.
2412 ///
2413 /// When using along with `previous_response_id`, the instructions from a previous response
2414 /// will not be carried over to the next response. This makes it simple to swap out
2415 /// system (or developer) messages in new responses.
2416 #[serde(skip_serializing_if = "Option::is_none")]
2417 pub instructions: Option<Instructions>,
2418
2419 /// An upper bound for the number of tokens that can be generated for a response,
2420 /// including visible output tokens and
2421 /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2422 #[serde(skip_serializing_if = "Option::is_none")]
2423 pub max_output_tokens: Option<u32>,
2424
2425 /// Set of 16 key-value pairs that can be attached to an object. This can be
2426 /// useful for storing additional information about the object in a structured
2427 /// format, and querying for objects via API or the dashboard.
2428 ///
2429 /// Keys are strings with a maximum length of 64 characters. Values are strings
2430 /// with a maximum length of 512 characters.
2431 #[serde(skip_serializing_if = "Option::is_none")]
2432 pub metadata: Option<HashMap<String, String>>,
2433
2434 /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2435 /// wide range of models with different capabilities, performance characteristics,
2436 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2437 pub model: String,
2438
2439 /// The object type of this resource - always set to `response`.
2440 pub object: String,
2441
2442 /// An array of content items generated by the model.
2443 ///
2444 /// - The length and order of items in the output array is dependent on the model's response.
2445 /// - Rather than accessing the first item in the output array and assuming it's an assistant
2446 /// message with the content generated by the model, you might consider using
2447 /// the `output_text` property where supported in SDKs.
2448 pub output: Vec<OutputItem>,
2449
2450 /// SDK-only convenience property that contains the aggregated text output from all
2451 /// `output_text` items in the `output` array, if any are present.
2452 /// Supported in the Python and JavaScript SDKs.
2453 // #[serde(skip_serializing_if = "Option::is_none")]
2454 // pub output_text: Option<String>,
2455
2456 /// Whether to allow the model to run tool calls in parallel.
2457 #[serde(skip_serializing_if = "Option::is_none")]
2458 pub parallel_tool_calls: Option<bool>,
2459
2460 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2461 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2462 /// Cannot be used in conjunction with `conversation`.
2463 #[serde(skip_serializing_if = "Option::is_none")]
2464 pub previous_response_id: Option<String>,
2465
2466 /// Reference to a prompt template and its variables.
2467 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2468 #[serde(skip_serializing_if = "Option::is_none")]
2469 pub prompt: Option<Prompt>,
2470
2471 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2472 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2473 #[serde(skip_serializing_if = "Option::is_none")]
2474 pub prompt_cache_key: Option<String>,
2475
2476 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2477 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2478 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2479 #[serde(skip_serializing_if = "Option::is_none")]
2480 pub prompt_cache_retention: Option<PromptCacheRetention>,
2481
2482 /// **gpt-5 and o-series models only**
2483 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2484 #[serde(skip_serializing_if = "Option::is_none")]
2485 pub reasoning: Option<Reasoning>,
2486
2487 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2488 /// usage policies.
2489 ///
2490 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2491 /// or email address, in order to avoid sending us any identifying information. [Learn
2492 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2493 #[serde(skip_serializing_if = "Option::is_none")]
2494 pub safety_identifier: Option<String>,
2495
2496 /// Specifies the processing type used for serving the request.
2497 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2498 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2499 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2500 /// - When not set, the default behavior is 'auto'.
2501 ///
2502 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2503 #[serde(skip_serializing_if = "Option::is_none")]
2504 pub service_tier: Option<ServiceTier>,
2505
2506 /// The status of the response generation.
2507 /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2508 pub status: Status,
2509
2510 /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2511 /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2512 ///
2513 /// We generally recommend altering this or `top_p` but not both.
2514 #[serde(skip_serializing_if = "Option::is_none")]
2515 pub temperature: Option<f32>,
2516
2517 /// Configuration options for a text response from the model. Can be plain
2518 /// text or structured JSON data. Learn more:
2519 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2520 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2521 #[serde(skip_serializing_if = "Option::is_none")]
2522 pub text: Option<ResponseTextParam>,
2523
2524 /// How the model should select which tool (or tools) to use when generating
2525 /// a response. See the `tools` parameter to see how to specify which tools
2526 /// the model can call.
2527 #[serde(skip_serializing_if = "Option::is_none")]
2528 pub tool_choice: Option<ToolChoiceParam>,
2529
2530 /// An array of tools the model may call while generating a response. You
2531 /// can specify which tool to use by setting the `tool_choice` parameter.
2532 ///
2533 /// We support the following categories of tools:
2534 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2535 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2536 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2537 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
2538 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2539 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
2540 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2541 /// - **Function calls (custom tools)**: Functions that are defined by you,
2542 /// enabling the model to call your own code with strongly typed arguments
2543 /// and outputs. Learn more about
2544 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2545 /// custom tools to call your own code.
2546 #[serde(skip_serializing_if = "Option::is_none")]
2547 pub tools: Option<Vec<Tool>>,
2548
2549 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2550 /// token position, each with an associated log probability.
2551 #[serde(skip_serializing_if = "Option::is_none")]
2552 pub top_logprobs: Option<u8>,
2553
2554 /// An alternative to sampling with temperature, called nucleus sampling,
2555 /// where the model considers the results of the tokens with top_p probability
2556 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2557 /// are considered.
2558 ///
2559 /// We generally recommend altering this or `temperature` but not both.
2560 #[serde(skip_serializing_if = "Option::is_none")]
2561 pub top_p: Option<f32>,
2562
2563 ///The truncation strategy to use for the model response.
2564 /// - `auto`: If the input to this Response exceeds
2565 /// the model's context window size, the model will truncate the
2566 /// response to fit the context window by dropping items from the beginning of the conversation.
2567 /// - `disabled` (default): If the input size will exceed the context window
2568 /// size for a model, the request will fail with a 400 error.
2569 #[serde(skip_serializing_if = "Option::is_none")]
2570 pub truncation: Option<Truncation>,
2571
2572 /// Represents token usage details including input tokens, output tokens,
2573 /// a breakdown of output tokens, and the total tokens used.
2574 #[serde(skip_serializing_if = "Option::is_none")]
2575 pub usage: Option<ResponseUsage>,
2576}
2577
2578#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2579#[serde(rename_all = "snake_case")]
2580pub enum Status {
2581 Completed,
2582 Failed,
2583 InProgress,
2584 Cancelled,
2585 Queued,
2586 Incomplete,
2587}
2588
2589/// Output item
2590#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2591#[serde(tag = "type")]
2592#[serde(rename_all = "snake_case")]
2593pub enum OutputItem {
2594 /// An output message from the model.
2595 Message(OutputMessage),
2596 /// The results of a file search tool call. See the
2597 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2598 /// for more information.
2599 FileSearchCall(FileSearchToolCall),
2600 /// A tool call to run a function. See the
2601 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2602 /// for more information.
2603 FunctionCall(FunctionToolCall),
2604 /// The results of a web search tool call. See the
2605 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2606 /// for more information.
2607 WebSearchCall(WebSearchToolCall),
2608 /// A tool call to a computer use tool. See the
2609 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2610 /// for more information.
2611 ComputerCall(ComputerToolCall),
2612 /// A description of the chain of thought used by a reasoning model while generating
2613 /// a response. Be sure to include these items in your `input` to the Responses API for
2614 /// subsequent turns of a conversation if you are manually
2615 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2616 Reasoning(ReasoningItem),
2617 /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
2618 Compaction(CompactionBody),
2619 /// An image generation request made by the model.
2620 ImageGenerationCall(ImageGenToolCall),
2621 /// A tool call to run code.
2622 CodeInterpreterCall(CodeInterpreterToolCall),
2623 /// A tool call to run a command on the local shell.
2624 LocalShellCall(LocalShellToolCall),
2625 /// A tool call that executes one or more shell commands in a managed environment.
2626 ShellCall(FunctionShellCall),
2627 /// The output of a shell tool call.
2628 ShellCallOutput(FunctionShellCallOutput),
2629 /// A tool call that applies file diffs by creating, deleting, or updating files.
2630 ApplyPatchCall(ApplyPatchToolCall),
2631 /// The output emitted by an apply patch tool call.
2632 ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2633 /// An invocation of a tool on an MCP server.
2634 McpCall(MCPToolCall),
2635 /// A list of tools available on an MCP server.
2636 McpListTools(MCPListTools),
2637 /// A request for human approval of a tool invocation.
2638 McpApprovalRequest(MCPApprovalRequest),
2639 /// A call to a custom tool created by the model.
2640 CustomToolCall(CustomToolCall),
2641}
2642
2643#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2644#[non_exhaustive]
2645pub struct CustomToolCall {
2646 /// An identifier used to map this custom tool call to a tool call output.
2647 pub call_id: String,
2648 /// The input for the custom tool call generated by the model.
2649 pub input: String,
2650 /// The name of the custom tool being called.
2651 pub name: String,
2652 /// The unique ID of the custom tool call in the OpenAI platform.
2653 pub id: String,
2654}
2655
2656#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2657pub struct DeleteResponse {
2658 pub object: String,
2659 pub deleted: bool,
2660 pub id: String,
2661}
2662
2663#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2664pub struct AnyItemReference {
2665 pub r#type: Option<String>,
2666 pub id: String,
2667}
2668
2669#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2670#[serde(tag = "type", rename_all = "snake_case")]
2671pub enum ItemResourceItem {
2672 Message(MessageItem),
2673 FileSearchCall(FileSearchToolCall),
2674 ComputerCall(ComputerToolCall),
2675 ComputerCallOutput(ComputerCallOutputItemParam),
2676 WebSearchCall(WebSearchToolCall),
2677 FunctionCall(FunctionToolCall),
2678 FunctionCallOutput(FunctionCallOutputItemParam),
2679 ImageGenerationCall(ImageGenToolCall),
2680 CodeInterpreterCall(CodeInterpreterToolCall),
2681 LocalShellCall(LocalShellToolCall),
2682 LocalShellCallOutput(LocalShellToolCallOutput),
2683 ShellCall(FunctionShellCallItemParam),
2684 ShellCallOutput(FunctionShellCallOutputItemParam),
2685 ApplyPatchCall(ApplyPatchToolCallItemParam),
2686 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2687 McpListTools(MCPListTools),
2688 McpApprovalRequest(MCPApprovalRequest),
2689 McpApprovalResponse(MCPApprovalResponse),
2690 McpCall(MCPToolCall),
2691}
2692
2693#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2694#[serde(untagged)]
2695pub enum ItemResource {
2696 ItemReference(AnyItemReference),
2697 Item(ItemResourceItem),
2698}
2699
2700/// A list of Response items.
2701#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2702pub struct ResponseItemList {
2703 /// The type of object returned, must be `list`.
2704 pub object: String,
2705 /// The ID of the first item in the list.
2706 pub first_id: Option<String>,
2707 /// The ID of the last item in the list.
2708 pub last_id: Option<String>,
2709 /// Whether there are more items in the list.
2710 pub has_more: bool,
2711 /// The list of items.
2712 pub data: Vec<ItemResource>,
2713}
2714
2715#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2716#[builder(
2717 name = "TokenCountsBodyArgs",
2718 pattern = "mutable",
2719 setter(into, strip_option),
2720 default
2721)]
2722#[builder(build_fn(error = "OpenAIError"))]
2723pub struct TokenCountsBody {
2724 /// The conversation that this response belongs to. Items from this
2725 /// conversation are prepended to `input_items` for this response request.
2726 /// Input items and output items from this response are automatically added to this
2727 /// conversation after this response completes.
2728 #[serde(skip_serializing_if = "Option::is_none")]
2729 pub conversation: Option<ConversationParam>,
2730
2731 /// Text, image, or file inputs to the model, used to generate a response
2732 #[serde(skip_serializing_if = "Option::is_none")]
2733 pub input: Option<InputParam>,
2734
2735 /// A system (or developer) message inserted into the model's context.
2736 ///
2737 /// When used along with `previous_response_id`, the instructions from a previous response will
2738 /// not be carried over to the next response. This makes it simple to swap out system (or
2739 /// developer) messages in new responses.
2740 #[serde(skip_serializing_if = "Option::is_none")]
2741 pub instructions: Option<String>,
2742
2743 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2744 /// wide range of models with different capabilities, performance characteristics,
2745 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2746 /// to browse and compare available models.
2747 #[serde(skip_serializing_if = "Option::is_none")]
2748 pub model: Option<String>,
2749
2750 /// Whether to allow the model to run tool calls in parallel.
2751 #[serde(skip_serializing_if = "Option::is_none")]
2752 pub parallel_tool_calls: Option<bool>,
2753
2754 /// The unique ID of the previous response to the model. Use this to create multi-turn
2755 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2756 /// Cannot be used in conjunction with `conversation`.
2757 #[serde(skip_serializing_if = "Option::is_none")]
2758 pub previous_response_id: Option<String>,
2759
2760 /// **gpt-5 and o-series models only**
2761 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2762 #[serde(skip_serializing_if = "Option::is_none")]
2763 pub reasoning: Option<Reasoning>,
2764
2765 /// Configuration options for a text response from the model. Can be plain
2766 /// text or structured JSON data. Learn more:
2767 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2768 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2769 #[serde(skip_serializing_if = "Option::is_none")]
2770 pub text: Option<ResponseTextParam>,
2771
2772 /// How the model should select which tool (or tools) to use when generating
2773 /// a response. See the `tools` parameter to see how to specify which tools
2774 /// the model can call.
2775 #[serde(skip_serializing_if = "Option::is_none")]
2776 pub tool_choice: Option<ToolChoiceParam>,
2777
2778 /// An array of tools the model may call while generating a response. You can specify which tool
2779 /// to use by setting the `tool_choice` parameter.
2780 #[serde(skip_serializing_if = "Option::is_none")]
2781 pub tools: Option<Vec<Tool>>,
2782
2783 ///The truncation strategy to use for the model response.
2784 /// - `auto`: If the input to this Response exceeds
2785 /// the model's context window size, the model will truncate the
2786 /// response to fit the context window by dropping items from the beginning of the conversation.
2787 /// - `disabled` (default): If the input size will exceed the context window
2788 /// size for a model, the request will fail with a 400 error.
2789 #[serde(skip_serializing_if = "Option::is_none")]
2790 pub truncation: Option<Truncation>,
2791}
2792
2793#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2794pub struct TokenCountsResource {
2795 pub object: String,
2796 pub input_tokens: u32,
2797}
2798
2799/// A compaction item generated by the `/v1/responses/compact` API.
2800#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2801pub struct CompactionSummaryItemParam {
2802 /// The ID of the compaction item.
2803 #[serde(skip_serializing_if = "Option::is_none")]
2804 pub id: Option<String>,
2805 /// The encrypted content of the compaction summary.
2806 pub encrypted_content: String,
2807}
2808
2809/// A compaction item generated by the `/v1/responses/compact` API.
2810#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2811pub struct CompactionBody {
2812 /// The unique ID of the compaction item.
2813 pub id: String,
2814 /// The encrypted content that was produced by compaction.
2815 pub encrypted_content: String,
2816 /// The identifier of the actor that created the item.
2817 #[serde(skip_serializing_if = "Option::is_none")]
2818 pub created_by: Option<String>,
2819}
2820
2821/// Request to compact a conversation.
2822#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
2823#[builder(name = "CompactResponseRequestArgs")]
2824#[builder(pattern = "mutable")]
2825#[builder(setter(into, strip_option), default)]
2826#[builder(derive(Debug))]
2827#[builder(build_fn(error = "OpenAIError"))]
2828pub struct CompactResponseRequest {
2829 /// Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of models
2830 /// with different capabilities, performance characteristics, and price points. Refer to the
2831 /// [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2832 pub model: String,
2833
2834 /// Text, image, or file inputs to the model, used to generate a response
2835 #[serde(skip_serializing_if = "Option::is_none")]
2836 pub input: Option<InputParam>,
2837
2838 /// The unique ID of the previous response to the model. Use this to create multi-turn
2839 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2840 /// Cannot be used in conjunction with `conversation`.
2841 #[serde(skip_serializing_if = "Option::is_none")]
2842 pub previous_response_id: Option<String>,
2843
2844 /// A system (or developer) message inserted into the model's context.
2845 ///
2846 /// When used along with `previous_response_id`, the instructions from a previous response will
2847 /// not be carried over to the next response. This makes it simple to swap out system (or
2848 /// developer) messages in new responses.
2849 #[serde(skip_serializing_if = "Option::is_none")]
2850 pub instructions: Option<String>,
2851}
2852
2853/// The compacted response object.
2854#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2855pub struct CompactResource {
2856 /// The unique identifier for the compacted response.
2857 pub id: String,
2858 /// The object type. Always `response.compaction`.
2859 pub object: String,
2860 /// The compacted list of output items. This is a list of all user messages,
2861 /// followed by a single compaction item.
2862 pub output: Vec<OutputItem>,
2863 /// Unix timestamp (in seconds) when the compacted conversation was created.
2864 pub created_at: u64,
2865 /// Token accounting for the compaction pass, including cached, reasoning, and total tokens.
2866 pub usage: ResponseUsage,
2867}