async_openai/types/responses/response.rs
1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4 CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5 ResponseUsage,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15 #[default]
16 User,
17 Assistant,
18 System,
19 Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26 InProgress,
27 Completed,
28 Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34 /// A text input to the model, equivalent to a text input with the
35 /// `user` role.
36 Text(String),
37 /// A list of one or many input items to the model, containing
38 /// different content types.
39 Items(Vec<InputItem>),
40}
41
42/// Content item used to generate a response.
43///
44/// This is a properly discriminated union based on the `type` field, using Rust's
45/// type-safe enum with serde's tag attribute for efficient deserialization.
46///
47/// # OpenAPI Specification
48/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50#[serde(tag = "type", rename_all = "snake_case")]
51pub enum Item {
52 /// A message (type: "message").
53 /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
54 ///
55 /// InputMessage:
56 /// A message input to the model with a role indicating instruction following hierarchy.
57 /// Instructions given with the developer or system role take precedence over instructions given with the user role.
58 /// OutputMessage:
59 /// A message output from the model.
60 Message(MessageItem),
61
62 /// The results of a file search tool call. See the
63 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
64 FileSearchCall(FileSearchToolCall),
65
66 /// A tool call to a computer use tool. See the
67 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
68 ComputerCall(ComputerToolCall),
69
70 /// The output of a computer tool call.
71 ComputerCallOutput(ComputerCallOutputItemParam),
72
73 /// The results of a web search tool call. See the
74 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
75 WebSearchCall(WebSearchToolCall),
76
77 /// A tool call to run a function. See the
78 ///
79 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
80 FunctionCall(FunctionToolCall),
81
82 /// The output of a function tool call.
83 FunctionCallOutput(FunctionCallOutputItemParam),
84
85 /// A description of the chain of thought used by a reasoning model while generating
86 /// a response. Be sure to include these items in your `input` to the Responses API
87 /// for subsequent turns of a conversation if you are manually
88 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
89 Reasoning(ReasoningItem),
90
91 /// An image generation request made by the model.
92 ImageGenerationCall(ImageGenToolCall),
93
94 /// A tool call to run code.
95 CodeInterpreterCall(CodeInterpreterToolCall),
96
97 /// A tool call to run a command on the local shell.
98 LocalShellCall(LocalShellToolCall),
99
100 /// The output of a local shell tool call.
101 LocalShellCallOutput(LocalShellToolCallOutput),
102
103 /// A tool representing a request to execute one or more shell commands.
104 FunctionShellCall(FunctionShellCallItemParam),
105
106 /// The streamed output items emitted by a function shell tool call.
107 FunctionShellCallOutput(FunctionShellCallOutputItemParam),
108
109 /// A tool call representing a request to create, delete, or update files using diff patches.
110 ApplyPatchCall(ApplyPatchToolCallItemParam),
111
112 /// The streamed output emitted by an apply patch tool call.
113 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
114
115 /// A list of tools available on an MCP server.
116 McpListTools(MCPListTools),
117
118 /// A request for human approval of a tool invocation.
119 McpApprovalRequest(MCPApprovalRequest),
120
121 /// A response to an MCP approval request.
122 McpApprovalResponse(MCPApprovalResponse),
123
124 /// An invocation of a tool on an MCP server.
125 McpCall(MCPToolCall),
126
127 /// The output of a custom tool call from your code, being sent back to the model.
128 CustomToolCallOutput(CustomToolCallOutput),
129
130 /// A call to a custom tool created by the model.
131 CustomToolCall(CustomToolCall),
132}
133
134/// Input item that can be used in the context for generating a response.
135///
136/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
137/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
138/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
139/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
140///
141/// Uses untagged deserialization because these types overlap in structure.
142/// Order matters: more specific structures are tried first.
143///
144/// # OpenAPI Specification
145/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
146#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
147#[serde(untagged)]
148pub enum InputItem {
149 /// A reference to an existing item by ID.
150 /// Has a required `id` field and optional `type` (can be "item_reference" or null).
151 /// Must be tried first as it's the most minimal structure.
152 ItemReference(ItemReference),
153
154 /// All structured items with proper type discrimination.
155 /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
156 /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
157 Item(Item),
158
159 /// A simple, user-friendly message input (EasyInputMessage).
160 /// Supports string content and can include assistant role for previous responses.
161 /// Must be tried last as it's the most flexible structure.
162 ///
163 /// A message input to the model with a role indicating instruction following
164 /// hierarchy. Instructions given with the `developer` or `system` role take
165 /// precedence over instructions given with the `user` role. Messages with the
166 /// `assistant` role are presumed to have been generated by the model in previous
167 /// interactions.
168 EasyMessage(EasyInputMessage),
169}
170
171/// A message item used within the `Item` enum.
172///
173/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
174/// enum to distinguish them based on their structure:
175/// - OutputMessage: role=assistant, required id & status fields
176/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
177///
178/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
179/// not part of the structured `Item` enum.
180#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
181#[serde(untagged)]
182pub enum MessageItem {
183 /// An output message from the model (role: assistant, has required id & status).
184 /// This must come first as it has the most specific structure (required id and status fields).
185 Output(OutputMessage),
186
187 /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
188 /// Has structured content list and optional id/status fields.
189 ///
190 /// A message input to the model with a role indicating instruction following hierarchy.
191 /// Instructions given with the `developer` or `system` role take precedence over instructions
192 /// given with the `user` role.
193 Input(InputMessage),
194}
195
196/// A reference to an existing item by ID.
197#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
198pub struct ItemReference {
199 /// The type of item to reference. Can be "item_reference" or null.
200 #[serde(skip_serializing_if = "Option::is_none")]
201 pub r#type: Option<ItemReferenceType>,
202 /// The ID of the item to reference.
203 pub id: String,
204}
205
206#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
207#[serde(rename_all = "snake_case")]
208pub enum ItemReferenceType {
209 ItemReference,
210}
211
212/// Output from a function call that you're providing back to the model.
213#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
214pub struct FunctionCallOutputItemParam {
215 /// The unique ID of the function tool call generated by the model.
216 pub call_id: String,
217 /// Text, image, or file output of the function tool call.
218 pub output: FunctionCallOutput,
219 /// The unique ID of the function tool call output.
220 /// Populated when this item is returned via API.
221 #[serde(skip_serializing_if = "Option::is_none")]
222 pub id: Option<String>,
223 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
224 /// Populated when items are returned via API.
225 #[serde(skip_serializing_if = "Option::is_none")]
226 pub status: Option<OutputStatus>,
227}
228
229#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
230#[serde(untagged)]
231pub enum FunctionCallOutput {
232 /// A JSON string of the output of the function tool call.
233 Text(String),
234 Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
235}
236
237#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
238pub struct ComputerCallOutputItemParam {
239 /// The ID of the computer tool call that produced the output.
240 pub call_id: String,
241 /// A computer screenshot image used with the computer use tool.
242 pub output: ComputerScreenshotImage,
243 /// The safety checks reported by the API that have been acknowledged by the developer.
244 #[serde(skip_serializing_if = "Option::is_none")]
245 pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
246 /// The unique ID of the computer tool call output. Optional when creating.
247 #[serde(skip_serializing_if = "Option::is_none")]
248 pub id: Option<String>,
249 /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
250 /// Populated when input items are returned via API.
251 #[serde(skip_serializing_if = "Option::is_none")]
252 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
253}
254
255#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
256#[serde(rename_all = "snake_case")]
257pub enum ComputerScreenshotImageType {
258 ComputerScreenshot,
259}
260
261/// A computer screenshot image used with the computer use tool.
262#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
263pub struct ComputerScreenshotImage {
264 /// Specifies the event type. For a computer screenshot, this property is always
265 /// set to `computer_screenshot`.
266 pub r#type: ComputerScreenshotImageType,
267 /// The identifier of an uploaded file that contains the screenshot.
268 #[serde(skip_serializing_if = "Option::is_none")]
269 pub file_id: Option<String>,
270 /// The URL of the screenshot image.
271 #[serde(skip_serializing_if = "Option::is_none")]
272 pub image_url: Option<String>,
273}
274
275/// Output from a local shell tool call that you're providing back to the model.
276#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
277pub struct LocalShellToolCallOutput {
278 /// The unique ID of the local shell tool call generated by the model.
279 pub id: String,
280
281 /// A JSON string of the output of the local shell tool call.
282 pub output: String,
283
284 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
285 #[serde(skip_serializing_if = "Option::is_none")]
286 pub status: Option<OutputStatus>,
287}
288
289/// Output from a local shell command execution.
290#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
291pub struct LocalShellOutput {
292 /// The stdout output from the command.
293 #[serde(skip_serializing_if = "Option::is_none")]
294 pub stdout: Option<String>,
295
296 /// The stderr output from the command.
297 #[serde(skip_serializing_if = "Option::is_none")]
298 pub stderr: Option<String>,
299
300 /// The exit code of the command.
301 #[serde(skip_serializing_if = "Option::is_none")]
302 pub exit_code: Option<i32>,
303}
304
305/// An MCP approval response that you're providing back to the model.
306#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
307pub struct MCPApprovalResponse {
308 /// The ID of the approval request being answered.
309 pub approval_request_id: String,
310
311 /// Whether the request was approved.
312 pub approve: bool,
313
314 /// The unique ID of the approval response
315 #[serde(skip_serializing_if = "Option::is_none")]
316 pub id: Option<String>,
317
318 /// Optional reason for the decision.
319 #[serde(skip_serializing_if = "Option::is_none")]
320 pub reason: Option<String>,
321}
322
323#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
324#[serde(untagged)]
325pub enum CustomToolCallOutputOutput {
326 /// A string of the output of the custom tool call.
327 Text(String),
328 /// Text, image, or file output of the custom tool call.
329 List(Vec<InputContent>),
330}
331
332#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
333pub struct CustomToolCallOutput {
334 /// The call ID, used to map this custom tool call output to a custom tool call.
335 pub call_id: String,
336
337 /// The output from the custom tool call generated by your code.
338 /// Can be a string or an list of output content.
339 pub output: CustomToolCallOutputOutput,
340
341 /// The unique ID of the custom tool call output in the OpenAI platform.
342 #[serde(skip_serializing_if = "Option::is_none")]
343 pub id: Option<String>,
344}
345
346/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
347///
348/// This is the most user-friendly way to provide messages, supporting both simple
349/// string content and structured content. Role can include `assistant` for providing
350/// previous assistant responses.
351#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
352#[builder(
353 name = "EasyInputMessageArgs",
354 pattern = "mutable",
355 setter(into, strip_option),
356 default
357)]
358#[builder(build_fn(error = "OpenAIError"))]
359pub struct EasyInputMessage {
360 /// The type of the message input. Always set to `message`.
361 pub r#type: MessageType,
362 /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
363 pub role: Role,
364 /// Text, image, or audio input to the model, used to generate a response.
365 /// Can also contain previous assistant responses.
366 pub content: EasyInputContent,
367}
368
369/// A structured message input to the model (InputMessage in the OpenAPI spec).
370///
371/// This variant requires structured content (not a simple string) and does not support
372/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
373#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
374#[builder(
375 name = "InputMessageArgs",
376 pattern = "mutable",
377 setter(into, strip_option),
378 default
379)]
380#[builder(build_fn(error = "OpenAIError"))]
381pub struct InputMessage {
382 /// A list of one or many input items to the model, containing different content types.
383 pub content: Vec<InputContent>,
384 /// The role of the message input. One of `user`, `system`, or `developer`.
385 /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
386 pub role: InputRole,
387 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
388 /// Populated when items are returned via API.
389 #[serde(skip_serializing_if = "Option::is_none")]
390 pub status: Option<OutputStatus>,
391 /////The type of the message input. Always set to `message`.
392 //pub r#type: MessageType,
393}
394
395/// The role for an input message - can only be `user`, `system`, or `developer`.
396/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
397#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
398#[serde(rename_all = "lowercase")]
399pub enum InputRole {
400 #[default]
401 User,
402 System,
403 Developer,
404}
405
406/// Content for EasyInputMessage - can be a simple string or structured list.
407#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
408#[serde(untagged)]
409pub enum EasyInputContent {
410 /// A text input to the model.
411 Text(String),
412 /// A list of one or many input items to the model, containing different content types.
413 ContentList(Vec<InputContent>),
414}
415
416/// Parts of a message: text, image, file, or audio.
417#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
418#[serde(tag = "type", rename_all = "snake_case")]
419pub enum InputContent {
420 /// A text input to the model.
421 InputText(InputTextContent),
422 /// An image input to the model. Learn about
423 /// [image inputs](https://platform.openai.com/docs/guides/vision).
424 InputImage(InputImageContent),
425 /// A file input to the model.
426 InputFile(InputFileContent),
427}
428
429#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
430pub struct InputTextContent {
431 /// The text input to the model.
432 pub text: String,
433}
434
435#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
436#[builder(
437 name = "InputImageArgs",
438 pattern = "mutable",
439 setter(into, strip_option),
440 default
441)]
442#[builder(build_fn(error = "OpenAIError"))]
443pub struct InputImageContent {
444 /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
445 /// Defaults to `auto`.
446 pub detail: ImageDetail,
447 /// The ID of the file to be sent to the model.
448 #[serde(skip_serializing_if = "Option::is_none")]
449 pub file_id: Option<String>,
450 /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
451 /// in a data URL.
452 #[serde(skip_serializing_if = "Option::is_none")]
453 pub image_url: Option<String>,
454}
455
456#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
457#[builder(
458 name = "InputFileArgs",
459 pattern = "mutable",
460 setter(into, strip_option),
461 default
462)]
463#[builder(build_fn(error = "OpenAIError"))]
464pub struct InputFileContent {
465 /// The content of the file to be sent to the model.
466 #[serde(skip_serializing_if = "Option::is_none")]
467 file_data: Option<String>,
468 /// The ID of the file to be sent to the model.
469 #[serde(skip_serializing_if = "Option::is_none")]
470 file_id: Option<String>,
471 /// The URL of the file to be sent to the model.
472 #[serde(skip_serializing_if = "Option::is_none")]
473 file_url: Option<String>,
474 /// The name of the file to be sent to the model.
475 #[serde(skip_serializing_if = "Option::is_none")]
476 filename: Option<String>,
477}
478
479#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
480pub struct Conversation {
481 /// The unique ID of the conversation.
482 pub id: String,
483}
484
485#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
486#[serde(untagged)]
487pub enum ConversationParam {
488 /// The unique ID of the conversation.
489 ConversationID(String),
490 /// The conversation that this response belongs to.
491 Object(Conversation),
492}
493
494#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
495pub enum IncludeEnum {
496 #[serde(rename = "file_search_call.results")]
497 FileSearchCallResults,
498 #[serde(rename = "web_search_call.results")]
499 WebSearchCallResults,
500 #[serde(rename = "web_search_call.action.sources")]
501 WebSearchCallActionSources,
502 #[serde(rename = "message.input_image.image_url")]
503 MessageInputImageImageUrl,
504 #[serde(rename = "computer_call_output.output.image_url")]
505 ComputerCallOutputOutputImageUrl,
506 #[serde(rename = "code_interpreter_call.outputs")]
507 CodeInterpreterCallOutputs,
508 #[serde(rename = "reasoning.encrypted_content")]
509 ReasoningEncryptedContent,
510 #[serde(rename = "message.output_text.logprobs")]
511 MessageOutputTextLogprobs,
512}
513
514#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
515pub struct ResponseStreamOptions {
516 /// When true, stream obfuscation will be enabled. Stream obfuscation adds
517 /// random characters to an `obfuscation` field on streaming delta events to
518 /// normalize payload sizes as a mitigation to certain side-channel attacks.
519 /// These obfuscation fields are included by default, but add a small amount
520 /// of overhead to the data stream. You can set `include_obfuscation` to
521 /// false to optimize for bandwidth if you trust the network links between
522 /// your application and the OpenAI API.
523 #[serde(skip_serializing_if = "Option::is_none")]
524 pub include_obfuscation: Option<bool>,
525}
526
527/// Builder for a Responses API request.
528#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
529#[builder(
530 name = "CreateResponseArgs",
531 pattern = "mutable",
532 setter(into, strip_option),
533 default
534)]
535#[builder(build_fn(error = "OpenAIError"))]
536pub struct CreateResponse {
537 /// Whether to run the model response in the background.
538 /// [Learn more](https://platform.openai.com/docs/guides/background).
539 #[serde(skip_serializing_if = "Option::is_none")]
540 pub background: Option<bool>,
541
542 /// The conversation that this response belongs to. Items from this conversation are prepended to
543 /// `input_items` for this response request.
544 ///
545 /// Input items and output items from this response are automatically added to this conversation after
546 /// this response completes.
547 #[serde(skip_serializing_if = "Option::is_none")]
548 pub conversation: Option<ConversationParam>,
549
550 /// Specify additional output data to include in the model response. Currently supported
551 /// values are:
552 ///
553 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
554 ///
555 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
556 /// interpreter tool call items.
557 ///
558 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
559 /// output.
560 ///
561 /// - `file_search_call.results`: Include the search results of the file search tool call.
562 ///
563 /// - `message.input_image.image_url`: Include image urls from the input message.
564 ///
565 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
566 ///
567 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
568 /// reasoning item outputs. This enables reasoning items to be used in multi-turn
569 /// conversations when using the Responses API statelessly (like when the `store` parameter is
570 /// set to `false`, or when an organization is enrolled in the zero data retention program).
571 #[serde(skip_serializing_if = "Option::is_none")]
572 pub include: Option<Vec<IncludeEnum>>,
573
574 /// Text, image, or file inputs to the model, used to generate a response.
575 ///
576 /// Learn more:
577 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
578 /// - [Image inputs](https://platform.openai.com/docs/guides/images)
579 /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
580 /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
581 /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
582 pub input: InputParam,
583
584 /// A system (or developer) message inserted into the model's context.
585 ///
586 /// When using along with `previous_response_id`, the instructions from a previous
587 /// response will not be carried over to the next response. This makes it simple
588 /// to swap out system (or developer) messages in new responses.
589 #[serde(skip_serializing_if = "Option::is_none")]
590 pub instructions: Option<String>,
591
592 /// An upper bound for the number of tokens that can be generated for a response, including
593 /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
594 #[serde(skip_serializing_if = "Option::is_none")]
595 pub max_output_tokens: Option<u32>,
596
597 /// The maximum number of total calls to built-in tools that can be processed in a response. This
598 /// maximum number applies across all built-in tool calls, not per individual tool. Any further
599 /// attempts to call a tool by the model will be ignored.
600 #[serde(skip_serializing_if = "Option::is_none")]
601 pub max_tool_calls: Option<u32>,
602
603 /// Set of 16 key-value pairs that can be attached to an object. This can be
604 /// useful for storing additional information about the object in a structured
605 /// format, and querying for objects via API or the dashboard.
606 ///
607 /// Keys are strings with a maximum length of 64 characters. Values are
608 /// strings with a maximum length of 512 characters.
609 #[serde(skip_serializing_if = "Option::is_none")]
610 pub metadata: Option<HashMap<String, String>>,
611
612 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
613 /// offers a wide range of models with different capabilities, performance
614 /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
615 /// to browse and compare available models.
616 #[serde(skip_serializing_if = "Option::is_none")]
617 pub model: Option<String>,
618
619 /// Whether to allow the model to run tool calls in parallel.
620 #[serde(skip_serializing_if = "Option::is_none")]
621 pub parallel_tool_calls: Option<bool>,
622
623 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
624 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
625 /// Cannot be used in conjunction with `conversation`.
626 #[serde(skip_serializing_if = "Option::is_none")]
627 pub previous_response_id: Option<String>,
628
629 /// Reference to a prompt template and its variables.
630 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
631 #[serde(skip_serializing_if = "Option::is_none")]
632 pub prompt: Option<Prompt>,
633
634 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
635 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
636 #[serde(skip_serializing_if = "Option::is_none")]
637 pub prompt_cache_key: Option<String>,
638
639 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
640 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
641 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
642 #[serde(skip_serializing_if = "Option::is_none")]
643 pub prompt_cache_retention: Option<PromptCacheRetention>,
644
645 /// **gpt-5 and o-series models only**
646 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
647 #[serde(skip_serializing_if = "Option::is_none")]
648 pub reasoning: Option<Reasoning>,
649
650 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
651 /// usage policies.
652 ///
653 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
654 /// or email address, in order to avoid sending us any identifying information. [Learn
655 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
656 #[serde(skip_serializing_if = "Option::is_none")]
657 pub safety_identifier: Option<String>,
658
659 /// Specifies the processing type used for serving the request.
660 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
661 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
662 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
663 /// - When not set, the default behavior is 'auto'.
664 ///
665 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
666 #[serde(skip_serializing_if = "Option::is_none")]
667 pub service_tier: Option<ServiceTier>,
668
669 /// Whether to store the generated model response for later retrieval via API.
670 #[serde(skip_serializing_if = "Option::is_none")]
671 pub store: Option<bool>,
672
673 /// If set to true, the model response data will be streamed to the client
674 /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
675 /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
676 /// for more information.
677 #[serde(skip_serializing_if = "Option::is_none")]
678 pub stream: Option<bool>,
679
680 /// Options for streaming responses. Only set this when you set `stream: true`.
681 #[serde(skip_serializing_if = "Option::is_none")]
682 pub stream_options: Option<ResponseStreamOptions>,
683
684 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
685 /// will make the output more random, while lower values like 0.2 will make it
686 /// more focused and deterministic. We generally recommend altering this or
687 /// `top_p` but not both.
688 #[serde(skip_serializing_if = "Option::is_none")]
689 pub temperature: Option<f32>,
690
691 /// Configuration options for a text response from the model. Can be plain
692 /// text or structured JSON data. Learn more:
693 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
694 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
695 #[serde(skip_serializing_if = "Option::is_none")]
696 pub text: Option<ResponseTextParam>,
697
698 /// How the model should select which tool (or tools) to use when generating
699 /// a response. See the `tools` parameter to see how to specify which tools
700 /// the model can call.
701 #[serde(skip_serializing_if = "Option::is_none")]
702 pub tool_choice: Option<ToolChoiceParam>,
703
704 /// An array of tools the model may call while generating a response. You
705 /// can specify which tool to use by setting the `tool_choice` parameter.
706 ///
707 /// We support the following categories of tools:
708 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
709 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
710 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
711 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
712 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
713 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
714 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
715 /// - **Function calls (custom tools)**: Functions that are defined by you,
716 /// enabling the model to call your own code with strongly typed arguments
717 /// and outputs. Learn more about
718 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
719 /// custom tools to call your own code.
720 #[serde(skip_serializing_if = "Option::is_none")]
721 pub tools: Option<Vec<Tool>>,
722
723 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
724 /// token position, each with an associated log probability.
725 #[serde(skip_serializing_if = "Option::is_none")]
726 pub top_logprobs: Option<u8>,
727
728 /// An alternative to sampling with temperature, called nucleus sampling,
729 /// where the model considers the results of the tokens with top_p probability
730 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
731 /// are considered.
732 ///
733 /// We generally recommend altering this or `temperature` but not both.
734 #[serde(skip_serializing_if = "Option::is_none")]
735 pub top_p: Option<f32>,
736
737 ///The truncation strategy to use for the model response.
738 /// - `auto`: If the input to this Response exceeds
739 /// the model's context window size, the model will truncate the
740 /// response to fit the context window by dropping items from the beginning of the conversation.
741 /// - `disabled` (default): If the input size will exceed the context window
742 /// size for a model, the request will fail with a 400 error.
743 #[serde(skip_serializing_if = "Option::is_none")]
744 pub truncation: Option<Truncation>,
745}
746
747#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
748#[serde(untagged)]
749pub enum ResponsePromptVariables {
750 String(String),
751 Content(InputContent),
752 Custom(serde_json::Value),
753}
754
755#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
756pub struct Prompt {
757 /// The unique identifier of the prompt template to use.
758 pub id: String,
759
760 /// Optional version of the prompt template.
761 #[serde(skip_serializing_if = "Option::is_none")]
762 pub version: Option<String>,
763
764 /// Optional map of values to substitute in for variables in your
765 /// prompt. The substitution values can either be strings, or other
766 /// Response input types like images or files.
767 #[serde(skip_serializing_if = "Option::is_none")]
768 pub variables: Option<ResponsePromptVariables>,
769}
770
771#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
772#[serde(rename_all = "lowercase")]
773pub enum ServiceTier {
774 #[default]
775 Auto,
776 Default,
777 Flex,
778 Scale,
779 Priority,
780}
781
782/// Truncation strategies.
783#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
784#[serde(rename_all = "lowercase")]
785pub enum Truncation {
786 Auto,
787 Disabled,
788}
789
790#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
791pub struct Billing {
792 pub payer: String,
793}
794
795/// o-series reasoning settings.
796#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
797#[builder(
798 name = "ReasoningArgs",
799 pattern = "mutable",
800 setter(into, strip_option),
801 default
802)]
803#[builder(build_fn(error = "OpenAIError"))]
804pub struct Reasoning {
805 /// Constrains effort on reasoning for
806 /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
807 /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
808 /// reasoning effort can result in faster responses and fewer tokens used
809 /// on reasoning in a response.
810 ///
811 /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
812 #[serde(skip_serializing_if = "Option::is_none")]
813 pub effort: Option<ReasoningEffort>,
814 /// A summary of the reasoning performed by the model. This can be
815 /// useful for debugging and understanding the model's reasoning process.
816 /// One of `auto`, `concise`, or `detailed`.
817 ///
818 /// `concise` is only supported for `computer-use-preview` models.
819 #[serde(skip_serializing_if = "Option::is_none")]
820 pub summary: Option<ReasoningSummary>,
821}
822
823/// o-series reasoning settings.
824#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
825#[serde(rename_all = "lowercase")]
826pub enum Verbosity {
827 Low,
828 Medium,
829 High,
830}
831
832#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
833#[serde(rename_all = "lowercase")]
834pub enum ReasoningSummary {
835 Auto,
836 Concise,
837 Detailed,
838}
839
840/// The retention policy for the prompt cache.
841#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
842pub enum PromptCacheRetention {
843 #[serde(rename = "in-memory")]
844 InMemory,
845 #[serde(rename = "24h")]
846 Hours24,
847}
848
849/// Configuration for text response format.
850#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
851pub struct ResponseTextParam {
852 /// An object specifying the format that the model must output.
853 ///
854 /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
855 /// which ensures the model will match your supplied JSON schema. Learn more in the
856 /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
857 ///
858 /// The default format is `{ "type": "text" }` with no additional options.
859 ///
860 /// **Not recommended for gpt-4o and newer models:**
861 ///
862 /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
863 /// ensures the message the model generates is valid JSON. Using `json_schema`
864 /// is preferred for models that support it.
865 pub format: TextResponseFormatConfiguration,
866
867 /// Constrains the verbosity of the model's response. Lower values will result in
868 /// more concise responses, while higher values will result in more verbose responses.
869 ///
870 /// Currently supported values are `low`, `medium`, and `high`.
871 #[serde(skip_serializing_if = "Option::is_none")]
872 pub verbosity: Option<Verbosity>,
873}
874
875#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
876#[serde(tag = "type", rename_all = "snake_case")]
877pub enum TextResponseFormatConfiguration {
878 /// Default response format. Used to generate text responses.
879 Text,
880 /// JSON object response format. An older method of generating JSON responses.
881 /// Using `json_schema` is recommended for models that support it.
882 /// Note that the model will not generate JSON without a system or user message
883 /// instructing it to do so.
884 JsonObject,
885 /// JSON Schema response format. Used to generate structured JSON responses.
886 /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
887 JsonSchema(ResponseFormatJsonSchema),
888}
889
890/// Definitions for model-callable tools.
891#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
892#[serde(tag = "type", rename_all = "snake_case")]
893pub enum Tool {
894 /// Defines a function in your own code the model can choose to call. Learn more about [function
895 /// calling](https://platform.openai.com/docs/guides/tools).
896 Function(FunctionTool),
897 /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
898 /// tool](https://platform.openai.com/docs/guides/tools-file-search).
899 FileSearch(FileSearchTool),
900 /// A tool that controls a virtual computer. Learn more about the [computer
901 /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
902 ComputerUsePreview(ComputerUsePreviewTool),
903 /// Search the Internet for sources related to the prompt. Learn more about the
904 /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
905 WebSearch(WebSearchTool),
906 /// type: web_search_2025_08_26
907 #[serde(rename = "web_search_2025_08_26")]
908 WebSearch20250826(WebSearchTool),
909 /// Give the model access to additional tools via remote Model Context Protocol
910 /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
911 Mcp(MCPTool),
912 /// A tool that runs Python code to help generate a response to a prompt.
913 CodeInterpreter(CodeInterpreterTool),
914 /// A tool that generates images using a model like `gpt-image-1`.
915 ImageGeneration(ImageGenTool),
916 /// A tool that allows the model to execute shell commands in a local environment.
917 LocalShell,
918 /// A tool that allows the model to execute shell commands.
919 Shell,
920 /// A custom tool that processes input using a specified format. Learn more about [custom
921 /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
922 Custom(CustomToolParam),
923 /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
924 ///tool](https://platform.openai.com/docs/guides/tools-web-search).
925 WebSearchPreview(WebSearchTool),
926 /// type: web_search_preview_2025_03_11
927 #[serde(rename = "web_search_preview_2025_03_11")]
928 WebSearchPreview20250311(WebSearchTool),
929 /// Allows the assistant to create, delete, or update files using unified diffs.
930 ApplyPatch,
931}
932
933#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
934pub struct CustomToolParam {
935 /// The name of the custom tool, used to identify it in tool calls.
936 pub name: String,
937 /// Optional description of the custom tool, used to provide more context.
938 pub description: Option<String>,
939 /// The input format for the custom tool. Default is unconstrained text.
940 pub format: CustomToolParamFormat,
941}
942
943#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
944#[serde(tag = "type", rename_all = "lowercase")]
945pub enum CustomToolParamFormat {
946 /// Unconstrained free-form text.
947 #[default]
948 Text,
949 /// A grammar defined by the user.
950 Grammar(CustomGrammarFormatParam),
951}
952
953#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
954#[builder(
955 name = "FileSearchToolArgs",
956 pattern = "mutable",
957 setter(into, strip_option),
958 default
959)]
960#[builder(build_fn(error = "OpenAIError"))]
961pub struct FileSearchTool {
962 /// The IDs of the vector stores to search.
963 pub vector_store_ids: Vec<String>,
964 /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
965 #[serde(skip_serializing_if = "Option::is_none")]
966 pub max_num_results: Option<u32>,
967 /// A filter to apply.
968 #[serde(skip_serializing_if = "Option::is_none")]
969 pub filters: Option<Filter>,
970 /// Ranking options for search.
971 #[serde(skip_serializing_if = "Option::is_none")]
972 pub ranking_options: Option<RankingOptions>,
973}
974
975#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
976#[builder(
977 name = "FunctionToolArgs",
978 pattern = "mutable",
979 setter(into, strip_option),
980 default
981)]
982pub struct FunctionTool {
983 /// The name of the function to call.
984 pub name: String,
985 /// A JSON schema object describing the parameters of the function.
986 #[serde(skip_serializing_if = "Option::is_none")]
987 pub parameters: Option<serde_json::Value>,
988 /// Whether to enforce strict parameter validation. Default `true`.
989 #[serde(skip_serializing_if = "Option::is_none")]
990 pub strict: Option<bool>,
991 /// A description of the function. Used by the model to determine whether or not to call the
992 /// function.
993 #[serde(skip_serializing_if = "Option::is_none")]
994 pub description: Option<String>,
995}
996
997#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
998pub struct WebSearchToolFilters {
999 /// Allowed domains for the search. If not provided, all domains are allowed.
1000 /// Subdomains of the provided domains are allowed as well.
1001 ///
1002 /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1003 #[serde(skip_serializing_if = "Option::is_none")]
1004 pub allowed_domains: Option<Vec<String>>,
1005}
1006
1007#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1008#[builder(
1009 name = "WebSearchToolArgs",
1010 pattern = "mutable",
1011 setter(into, strip_option),
1012 default
1013)]
1014pub struct WebSearchTool {
1015 /// Filters for the search.
1016 #[serde(skip_serializing_if = "Option::is_none")]
1017 pub filters: Option<WebSearchToolFilters>,
1018 /// The approximate location of the user.
1019 #[serde(skip_serializing_if = "Option::is_none")]
1020 pub user_location: Option<WebSearchApproximateLocation>,
1021 /// High level guidance for the amount of context window space to use for the search. One of `low`,
1022 /// `medium`, or `high`. `medium` is the default.
1023 #[serde(skip_serializing_if = "Option::is_none")]
1024 pub search_context_size: Option<WebSearchToolSearchContextSize>,
1025}
1026
1027#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1028#[serde(rename_all = "lowercase")]
1029pub enum WebSearchToolSearchContextSize {
1030 Low,
1031 #[default]
1032 Medium,
1033 High,
1034}
1035
1036#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1037#[serde(rename_all = "lowercase")]
1038pub enum ComputerEnvironment {
1039 Windows,
1040 Mac,
1041 Linux,
1042 Ubuntu,
1043 #[default]
1044 Browser,
1045}
1046
1047#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1048#[builder(
1049 name = "ComputerUsePreviewToolArgs",
1050 pattern = "mutable",
1051 setter(into, strip_option),
1052 default
1053)]
1054pub struct ComputerUsePreviewTool {
1055 /// The type of computer environment to control.
1056 environment: ComputerEnvironment,
1057 /// The width of the computer display.
1058 display_width: u32,
1059 /// The height of the computer display.
1060 display_height: u32,
1061}
1062
1063#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1064pub enum RankVersionType {
1065 #[serde(rename = "auto")]
1066 Auto,
1067 #[serde(rename = "default-2024-11-15")]
1068 Default20241115,
1069}
1070
1071#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1072pub struct HybridSearch {
1073 /// The weight of the embedding in the reciprocal ranking fusion.
1074 pub embedding_weight: f32,
1075 /// The weight of the text in the reciprocal ranking fusion.
1076 pub text_weight: f32,
1077}
1078
1079/// Options for search result ranking.
1080#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1081pub struct RankingOptions {
1082 /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1083 /// sparse keyword matches when hybrid search is enabled.
1084 #[serde(skip_serializing_if = "Option::is_none")]
1085 pub hybrid_search: Option<HybridSearch>,
1086 /// The ranker to use for the file search.
1087 pub ranker: RankVersionType,
1088 /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1089 /// attempt to return only the most relevant results, but may return fewer results.
1090 #[serde(skip_serializing_if = "Option::is_none")]
1091 pub score_threshold: Option<f32>,
1092}
1093
1094#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1095#[serde(rename_all = "lowercase")]
1096pub enum WebSearchApproximateLocationType {
1097 #[default]
1098 Approximate,
1099}
1100
1101/// Approximate user location for web search.
1102#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1103#[builder(
1104 name = "WebSearchApproximateLocationArgs",
1105 pattern = "mutable",
1106 setter(into, strip_option),
1107 default
1108)]
1109#[builder(build_fn(error = "OpenAIError"))]
1110pub struct WebSearchApproximateLocation {
1111 /// The type of location approximation. Always `approximate`.
1112 pub r#type: WebSearchApproximateLocationType,
1113 /// Free text input for the city of the user, e.g. `San Francisco`.
1114 #[serde(skip_serializing_if = "Option::is_none")]
1115 pub city: Option<String>,
1116 /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1117 /// e.g. `US`.
1118 #[serde(skip_serializing_if = "Option::is_none")]
1119 pub country: Option<String>,
1120 /// Free text input for the region of the user, e.g. `California`.
1121 #[serde(skip_serializing_if = "Option::is_none")]
1122 pub region: Option<String>,
1123 /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1124 /// `America/Los_Angeles`.
1125 #[serde(skip_serializing_if = "Option::is_none")]
1126 pub timezone: Option<String>,
1127}
1128
1129/// Container configuration for a code interpreter.
1130#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1131#[serde(tag = "type", rename_all = "snake_case")]
1132pub enum CodeInterpreterToolContainer {
1133 /// Configuration for a code interpreter container. Optionally specify the IDs of the
1134 /// files to run the code on.
1135 Auto(CodeInterpreterContainerAuto),
1136
1137 /// The container ID.
1138 #[serde(untagged)]
1139 ContainerID(String),
1140}
1141
1142/// Auto configuration for code interpreter container.
1143#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1144pub struct CodeInterpreterContainerAuto {
1145 /// An optional list of uploaded files to make available to your code.
1146 #[serde(skip_serializing_if = "Option::is_none")]
1147 pub file_ids: Option<Vec<String>>,
1148
1149 #[serde(skip_serializing_if = "Option::is_none")]
1150 pub memory_limit: Option<u64>,
1151}
1152
1153#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1154#[builder(
1155 name = "CodeInterpreterToolArgs",
1156 pattern = "mutable",
1157 setter(into, strip_option),
1158 default
1159)]
1160#[builder(build_fn(error = "OpenAIError"))]
1161pub struct CodeInterpreterTool {
1162 /// The code interpreter container. Can be a container ID or an object that
1163 /// specifies uploaded file IDs to make available to your code.
1164 pub container: CodeInterpreterToolContainer,
1165}
1166
1167#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1168pub struct ImageGenToolInputImageMask {
1169 /// Base64-encoded mask image.
1170 #[serde(skip_serializing_if = "Option::is_none")]
1171 pub image_url: Option<String>,
1172 /// File ID for the mask image.
1173 #[serde(skip_serializing_if = "Option::is_none")]
1174 pub file_id: Option<String>,
1175}
1176
1177#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1178#[serde(rename_all = "lowercase")]
1179pub enum InputFidelity {
1180 #[default]
1181 High,
1182 Low,
1183}
1184
1185#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1186#[serde(rename_all = "lowercase")]
1187pub enum ImageGenToolModeration {
1188 #[default]
1189 Auto,
1190 Low,
1191}
1192
1193/// Image generation tool definition.
1194#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1195#[builder(
1196 name = "ImageGenerationArgs",
1197 pattern = "mutable",
1198 setter(into, strip_option),
1199 default
1200)]
1201#[builder(build_fn(error = "OpenAIError"))]
1202pub struct ImageGenTool {
1203 /// Background type for the generated image. One of `transparent`,
1204 /// `opaque`, or `auto`. Default: `auto`.
1205 #[serde(skip_serializing_if = "Option::is_none")]
1206 pub background: Option<ImageGenToolBackground>,
1207 /// Control how much effort the model will exert to match the style and features, especially facial features,
1208 /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1209 /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1210 #[serde(skip_serializing_if = "Option::is_none")]
1211 pub input_fidelity: Option<InputFidelity>,
1212 /// Optional mask for inpainting. Contains `image_url`
1213 /// (string, optional) and `file_id` (string, optional).
1214 #[serde(skip_serializing_if = "Option::is_none")]
1215 pub input_image_mask: Option<ImageGenToolInputImageMask>,
1216 /// The image generation model to use. Default: `gpt-image-1`.
1217 #[serde(skip_serializing_if = "Option::is_none")]
1218 pub model: Option<String>,
1219 /// Moderation level for the generated image. Default: `auto`.
1220 #[serde(skip_serializing_if = "Option::is_none")]
1221 pub moderation: Option<ImageGenToolModeration>,
1222 /// Compression level for the output image. Default: 100.
1223 #[serde(skip_serializing_if = "Option::is_none")]
1224 pub output_compression: Option<u8>,
1225 /// The output format of the generated image. One of `png`, `webp`, or
1226 /// `jpeg`. Default: `png`.
1227 #[serde(skip_serializing_if = "Option::is_none")]
1228 pub output_format: Option<ImageGenToolOutputFormat>,
1229 /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1230 #[serde(skip_serializing_if = "Option::is_none")]
1231 pub partial_images: Option<u8>,
1232 /// The quality of the generated image. One of `low`, `medium`, `high`,
1233 /// or `auto`. Default: `auto`.
1234 #[serde(skip_serializing_if = "Option::is_none")]
1235 pub quality: Option<ImageGenToolQuality>,
1236 /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1237 /// `1536x1024`, or `auto`. Default: `auto`.
1238 #[serde(skip_serializing_if = "Option::is_none")]
1239 pub size: Option<ImageGenToolSize>,
1240}
1241
1242#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1243#[serde(rename_all = "lowercase")]
1244pub enum ImageGenToolBackground {
1245 Transparent,
1246 Opaque,
1247 #[default]
1248 Auto,
1249}
1250
1251#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1252#[serde(rename_all = "lowercase")]
1253pub enum ImageGenToolOutputFormat {
1254 #[default]
1255 Png,
1256 Webp,
1257 Jpeg,
1258}
1259
1260#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1261#[serde(rename_all = "lowercase")]
1262pub enum ImageGenToolQuality {
1263 Low,
1264 Medium,
1265 High,
1266 #[default]
1267 Auto,
1268}
1269
1270#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1271#[serde(rename_all = "lowercase")]
1272pub enum ImageGenToolSize {
1273 #[default]
1274 Auto,
1275 #[serde(rename = "1024x1024")]
1276 Size1024x1024,
1277 #[serde(rename = "1024x1536")]
1278 Size1024x1536,
1279 #[serde(rename = "1536x1024")]
1280 Size1536x1024,
1281}
1282
1283#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1284#[serde(rename_all = "lowercase")]
1285pub enum ToolChoiceAllowedMode {
1286 Auto,
1287 Required,
1288}
1289
1290#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1291pub struct ToolChoiceAllowed {
1292 /// Constrains the tools available to the model to a pre-defined set.
1293 ///
1294 /// `auto` allows the model to pick from among the allowed tools and generate a
1295 /// message.
1296 ///
1297 /// `required` requires the model to call one or more of the allowed tools.
1298 pub mode: ToolChoiceAllowedMode,
1299 /// A list of tool definitions that the model should be allowed to call.
1300 ///
1301 /// For the Responses API, the list of tool definitions might look like:
1302 /// ```json
1303 /// [
1304 /// { "type": "function", "name": "get_weather" },
1305 /// { "type": "mcp", "server_label": "deepwiki" },
1306 /// { "type": "image_generation" }
1307 /// ]
1308 /// ```
1309 pub tools: Vec<serde_json::Value>,
1310}
1311
1312/// The type of hosted tool the model should to use. Learn more about
1313/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1314#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1315#[serde(tag = "type", rename_all = "snake_case")]
1316pub enum ToolChoiceTypes {
1317 FileSearch,
1318 WebSearchPreview,
1319 ComputerUsePreview,
1320 CodeInterpreter,
1321 ImageGeneration,
1322}
1323
1324#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1325pub struct ToolChoiceFunction {
1326 /// The name of the function to call.
1327 pub name: String,
1328}
1329
1330#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1331pub struct ToolChoiceMCP {
1332 /// The name of the tool to call on the server.
1333 pub name: String,
1334 /// The label of the MCP server to use.
1335 pub server_label: String,
1336}
1337
1338#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1339pub struct ToolChoiceCustom {
1340 /// The name of the custom tool to call.
1341 pub name: String,
1342}
1343
1344#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1345#[serde(tag = "type", rename_all = "snake_case")]
1346pub enum ToolChoiceParam {
1347 /// Constrains the tools available to the model to a pre-defined set.
1348 AllowedTools(ToolChoiceAllowed),
1349
1350 /// Use this option to force the model to call a specific function.
1351 Function(ToolChoiceFunction),
1352
1353 /// Use this option to force the model to call a specific tool on a remote MCP server.
1354 Mcp(ToolChoiceMCP),
1355
1356 /// Use this option to force the model to call a custom tool.
1357 Custom(ToolChoiceCustom),
1358
1359 /// Forces the model to call the apply_patch tool when executing a tool call.
1360 ApplyPatch,
1361
1362 /// Forces the model to call the function shell tool when a tool call is required.
1363 Shell,
1364
1365 /// Indicates that the model should use a built-in tool to generate a response.
1366 /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1367 #[serde(untagged)]
1368 Hosted(ToolChoiceTypes),
1369
1370 /// Controls which (if any) tool is called by the model.
1371 ///
1372 /// `none` means the model will not call any tool and instead generates a message.
1373 ///
1374 /// `auto` means the model can pick between generating a message or calling one or
1375 /// more tools.
1376 ///
1377 /// `required` means the model must call one or more tools.
1378 #[serde(untagged)]
1379 Mode(ToolChoiceOptions),
1380}
1381
1382#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1383#[serde(rename_all = "lowercase")]
1384pub enum ToolChoiceOptions {
1385 None,
1386 Auto,
1387 Required,
1388}
1389
1390/// Error returned by the API when a request fails.
1391#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1392pub struct ErrorObject {
1393 /// The error code for the response.
1394 pub code: String,
1395 /// A human-readable description of the error.
1396 pub message: String,
1397}
1398
1399/// Details about an incomplete response.
1400#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1401pub struct IncompleteDetails {
1402 /// The reason why the response is incomplete.
1403 pub reason: String,
1404}
1405
1406#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1407pub struct TopLogProb {
1408 pub bytes: Vec<u8>,
1409 pub logprob: f64,
1410 pub token: String,
1411}
1412
1413#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1414pub struct LogProb {
1415 pub bytes: Vec<u8>,
1416 pub logprob: f64,
1417 pub token: String,
1418 pub top_logprobs: Vec<TopLogProb>,
1419}
1420
1421#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1422pub struct ResponseTopLobProb {
1423 /// The log probability of this token.
1424 pub logprob: f64,
1425 /// A possible text token.
1426 pub token: String,
1427}
1428
1429#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1430pub struct ResponseLogProb {
1431 /// The log probability of this token.
1432 pub logprob: f64,
1433 /// A possible text token.
1434 pub token: String,
1435 /// The log probability of the top 20 most likely tokens.
1436 pub top_logprobs: Vec<ResponseTopLobProb>,
1437}
1438
1439/// A simple text output from the model.
1440#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1441pub struct OutputTextContent {
1442 /// The annotations of the text output.
1443 pub annotations: Vec<Annotation>,
1444 pub logprobs: Option<Vec<LogProb>>,
1445 /// The text output from the model.
1446 pub text: String,
1447}
1448
1449#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1450#[serde(tag = "type", rename_all = "snake_case")]
1451pub enum Annotation {
1452 /// A citation to a file.
1453 FileCitation(FileCitationBody),
1454 /// A citation for a web resource used to generate a model response.
1455 UrlCitation(UrlCitationBody),
1456 /// A citation for a container file used to generate a model response.
1457 ContainerFileCitation(ContainerFileCitationBody),
1458 /// A path to a file.
1459 FilePath(FilePath),
1460}
1461
1462#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1463pub struct FileCitationBody {
1464 /// The ID of the file.
1465 file_id: String,
1466 /// The filename of the file cited.
1467 filename: String,
1468 /// The index of the file in the list of files.
1469 index: u32,
1470}
1471
1472#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1473pub struct UrlCitationBody {
1474 /// The index of the last character of the URL citation in the message.
1475 end_index: u32,
1476 /// The index of the first character of the URL citation in the message.
1477 start_index: u32,
1478 /// The title of the web resource.
1479 title: String,
1480 /// The URL of the web resource.
1481 url: String,
1482}
1483
1484#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1485pub struct ContainerFileCitationBody {
1486 /// The ID of the container file.
1487 container_id: String,
1488 /// The index of the last character of the container file citation in the message.
1489 end_index: u32,
1490 /// The ID of the file.
1491 file_id: String,
1492 /// The filename of the container file cited.
1493 filename: String,
1494 /// The index of the first character of the container file citation in the message.
1495 start_index: u32,
1496}
1497
1498#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1499pub struct FilePath {
1500 /// The ID of the file.
1501 file_id: String,
1502 /// The index of the file in the list of files.
1503 index: u32,
1504}
1505
1506/// A refusal explanation from the model.
1507#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1508pub struct RefusalContent {
1509 /// The refusal explanation from the model.
1510 pub refusal: String,
1511}
1512
1513/// A message generated by the model.
1514#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1515pub struct OutputMessage {
1516 /// The content of the output message.
1517 pub content: Vec<OutputMessageContent>,
1518 /// The unique ID of the output message.
1519 pub id: String,
1520 /// The role of the output message. Always `assistant`.
1521 pub role: AssistantRole,
1522 /// The status of the message input. One of `in_progress`, `completed`, or
1523 /// `incomplete`. Populated when input items are returned via API.
1524 pub status: OutputStatus,
1525 ///// The type of the output message. Always `message`.
1526 //pub r#type: MessageType,
1527}
1528
1529#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1530#[serde(rename_all = "lowercase")]
1531pub enum MessageType {
1532 #[default]
1533 Message,
1534}
1535
1536/// The role for an output message - always `assistant`.
1537/// This type ensures type safety by only allowing the assistant role.
1538#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1539#[serde(rename_all = "lowercase")]
1540pub enum AssistantRole {
1541 #[default]
1542 Assistant,
1543}
1544
1545#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1546#[serde(tag = "type", rename_all = "snake_case")]
1547pub enum OutputMessageContent {
1548 /// A text output from the model.
1549 OutputText(OutputTextContent),
1550 /// A refusal from the model.
1551 Refusal(RefusalContent),
1552}
1553
1554#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1555#[serde(tag = "type", rename_all = "snake_case")]
1556pub enum OutputContent {
1557 /// A text output from the model.
1558 OutputText(OutputTextContent),
1559 /// A refusal from the model.
1560 Refusal(RefusalContent),
1561 /// Reasoning text from the model.
1562 ReasoningText(ReasoningTextContent),
1563}
1564
1565#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1566pub struct ReasoningTextContent {
1567 /// The reasoning text from the model.
1568 pub text: String,
1569}
1570
1571/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1572#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1573pub struct ReasoningItem {
1574 /// Unique identifier of the reasoning content.
1575 pub id: String,
1576 /// Reasoning summary content.
1577 pub summary: Vec<SummaryPart>,
1578 /// Reasoning text content.
1579 #[serde(skip_serializing_if = "Option::is_none")]
1580 pub content: Option<Vec<ReasoningTextContent>>,
1581 /// The encrypted content of the reasoning item - populated when a response is generated with
1582 /// `reasoning.encrypted_content` in the `include` parameter.
1583 #[serde(skip_serializing_if = "Option::is_none")]
1584 pub encrypted_content: Option<String>,
1585 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1586 /// Populated when items are returned via API.
1587 #[serde(skip_serializing_if = "Option::is_none")]
1588 pub status: Option<OutputStatus>,
1589}
1590
1591/// A single summary text fragment from reasoning.
1592#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1593pub struct Summary {
1594 /// A summary of the reasoning output from the model so far.
1595 pub text: String,
1596}
1597
1598#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1599#[serde(tag = "type", rename_all = "snake_case")]
1600pub enum SummaryPart {
1601 SummaryText(Summary),
1602}
1603
1604/// File search tool call output.
1605#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1606pub struct FileSearchToolCall {
1607 /// The unique ID of the file search tool call.
1608 pub id: String,
1609 /// The queries used to search for files.
1610 pub queries: Vec<String>,
1611 /// The status of the file search tool call. One of `in_progress`, `searching`,
1612 /// `incomplete`,`failed`, or `completed`.
1613 pub status: FileSearchToolCallStatus,
1614 /// The results of the file search tool call.
1615 #[serde(skip_serializing_if = "Option::is_none")]
1616 pub results: Option<Vec<FileSearchToolCallResult>>,
1617}
1618
1619#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1620#[serde(rename_all = "snake_case")]
1621pub enum FileSearchToolCallStatus {
1622 InProgress,
1623 Searching,
1624 Incomplete,
1625 Failed,
1626 Completed,
1627}
1628
1629/// A single result from a file search.
1630#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1631pub struct FileSearchToolCallResult {
1632 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1633 /// additional information about the object in a structured format, and querying for objects
1634 /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1635 /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1636 pub attributes: HashMap<String, serde_json::Value>,
1637 /// The unique ID of the file.
1638 pub file_id: String,
1639 /// The name of the file.
1640 pub filename: String,
1641 /// The relevance score of the file - a value between 0 and 1.
1642 pub score: f32,
1643 /// The text that was retrieved from the file.
1644 pub text: String,
1645}
1646
1647#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1648pub struct ComputerCallSafetyCheckParam {
1649 /// The ID of the pending safety check.
1650 pub id: String,
1651 /// The type of the pending safety check.
1652 #[serde(skip_serializing_if = "Option::is_none")]
1653 pub code: Option<String>,
1654 /// Details about the pending safety check.
1655 #[serde(skip_serializing_if = "Option::is_none")]
1656 pub message: Option<String>,
1657}
1658
1659#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1660#[serde(rename_all = "snake_case")]
1661pub enum WebSearchToolCallStatus {
1662 InProgress,
1663 Searching,
1664 Completed,
1665 Failed,
1666}
1667
1668#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1669pub struct WebSearchActionSearchSource {
1670 /// The type of source. Always `url`.
1671 pub r#type: String,
1672 /// The URL of the source.
1673 pub url: String,
1674}
1675
1676#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1677pub struct WebSearchActionSearch {
1678 /// The search query.
1679 pub query: String,
1680 /// The sources used in the search.
1681 pub sources: Option<Vec<WebSearchActionSearchSource>>,
1682}
1683
1684#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1685pub struct WebSearchActionOpenPage {
1686 /// The URL opened by the model.
1687 pub url: String,
1688}
1689
1690#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1691pub struct WebSearchActionFind {
1692 /// The URL of the page searched for the pattern.
1693 pub url: String,
1694 /// The pattern or text to search for within the page.
1695 pub pattern: String,
1696}
1697
1698#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1699#[serde(tag = "type", rename_all = "snake_case")]
1700pub enum WebSearchToolCallAction {
1701 /// Action type "search" - Performs a web search query.
1702 Search(WebSearchActionSearch),
1703 /// Action type "open_page" - Opens a specific URL from search results.
1704 OpenPage(WebSearchActionOpenPage),
1705 /// Action type "find": Searches for a pattern within a loaded page.
1706 Find(WebSearchActionFind),
1707}
1708
1709/// Web search tool call output.
1710#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1711pub struct WebSearchToolCall {
1712 /// An object describing the specific action taken in this web search call. Includes
1713 /// details on how the model used the web (search, open_page, find).
1714 pub action: WebSearchToolCallAction,
1715 /// The unique ID of the web search tool call.
1716 pub id: String,
1717 /// The status of the web search tool call.
1718 pub status: WebSearchToolCallStatus,
1719}
1720
1721/// Output from a computer tool call.
1722#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1723pub struct ComputerToolCall {
1724 pub action: ComputerAction,
1725 /// An identifier used when responding to the tool call with output.
1726 pub call_id: String,
1727 /// The unique ID of the computer call.
1728 pub id: String,
1729 /// The pending safety checks for the computer call.
1730 pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1731 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1732 /// Populated when items are returned via API.
1733 pub status: OutputStatus,
1734}
1735
1736/// A point in 2D space.
1737#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1738pub struct DragPoint {
1739 /// The x-coordinate.
1740 pub x: i32,
1741 /// The y-coordinate.
1742 pub y: i32,
1743}
1744
1745/// Represents all user‐triggered actions.
1746#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1747#[serde(tag = "type", rename_all = "snake_case")]
1748pub enum ComputerAction {
1749 /// A click action.
1750 Click(ClickParam),
1751
1752 /// A double click action.
1753 DoubleClick(DoubleClickAction),
1754
1755 /// A drag action.
1756 Drag(Drag),
1757
1758 /// A collection of keypresses the model would like to perform.
1759 Keypress(KeyPressAction),
1760
1761 /// A mouse move action.
1762 Move(Move),
1763
1764 /// A screenshot action.
1765 Screenshot,
1766
1767 /// A scroll action.
1768 Scroll(Scroll),
1769
1770 /// An action to type in text.
1771 Type(Type),
1772
1773 /// A wait action.
1774 Wait,
1775}
1776
1777#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1778#[serde(rename_all = "lowercase")]
1779pub enum ClickButtonType {
1780 Left,
1781 Right,
1782 Wheel,
1783 Back,
1784 Forward,
1785}
1786
1787/// A click action.
1788#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1789pub struct ClickParam {
1790 /// Indicates which mouse button was pressed during the click. One of `left`,
1791 /// `right`, `wheel`, `back`, or `forward`.
1792 pub button: ClickButtonType,
1793 /// The x-coordinate where the click occurred.
1794 pub x: i32,
1795 /// The y-coordinate where the click occurred.
1796 pub y: i32,
1797}
1798
1799/// A double click action.
1800#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1801pub struct DoubleClickAction {
1802 /// The x-coordinate where the double click occurred.
1803 pub x: i32,
1804 /// The y-coordinate where the double click occurred.
1805 pub y: i32,
1806}
1807
1808/// A drag action.
1809#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1810pub struct Drag {
1811 /// The path of points the cursor drags through.
1812 pub path: Vec<DragPoint>,
1813}
1814
1815/// A keypress action.
1816#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1817pub struct KeyPressAction {
1818 /// The combination of keys the model is requesting to be pressed.
1819 /// This is an array of strings, each representing a key.
1820 pub keys: Vec<String>,
1821}
1822
1823/// A mouse move action.
1824#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1825pub struct Move {
1826 /// The x-coordinate to move to.
1827 pub x: i32,
1828 /// The y-coordinate to move to.
1829 pub y: i32,
1830}
1831
1832/// A scroll action.
1833#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1834pub struct Scroll {
1835 /// The horizontal scroll distance.
1836 pub scroll_x: i32,
1837 /// The vertical scroll distance.
1838 pub scroll_y: i32,
1839 /// The x-coordinate where the scroll occurred.
1840 pub x: i32,
1841 /// The y-coordinate where the scroll occurred.
1842 pub y: i32,
1843}
1844
1845/// A typing (text entry) action.
1846#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1847pub struct Type {
1848 /// The text to type.
1849 pub text: String,
1850}
1851
1852#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1853pub struct FunctionToolCall {
1854 /// A JSON string of the arguments to pass to the function.
1855 pub arguments: String,
1856 /// The unique ID of the function tool call generated by the model.
1857 pub call_id: String,
1858 /// The name of the function to run.
1859 pub name: String,
1860 /// The unique ID of the function tool call.
1861 #[serde(skip_serializing_if = "Option::is_none")]
1862 pub id: Option<String>,
1863 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1864 /// Populated when items are returned via API.
1865 #[serde(skip_serializing_if = "Option::is_none")]
1866 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1867}
1868
1869#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1870#[serde(rename_all = "snake_case")]
1871pub enum ImageGenToolCallStatus {
1872 InProgress,
1873 Completed,
1874 Generating,
1875 Failed,
1876}
1877
1878#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1879pub struct ImageGenToolCall {
1880 /// The unique ID of the image generation call.
1881 pub id: String,
1882 /// The generated image encoded in base64.
1883 pub result: Option<String>,
1884 /// The status of the image generation call.
1885 pub status: ImageGenToolCallStatus,
1886}
1887
1888#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1889#[serde(rename_all = "snake_case")]
1890pub enum CodeInterpreterToolCallStatus {
1891 InProgress,
1892 Completed,
1893 Incomplete,
1894 Interpreting,
1895 Failed,
1896}
1897
1898/// Output of a code interpreter request.
1899#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1900pub struct CodeInterpreterToolCall {
1901 /// The code to run, or null if not available.
1902 #[serde(skip_serializing_if = "Option::is_none")]
1903 pub code: Option<String>,
1904 /// ID of the container used to run the code.
1905 pub container_id: String,
1906 /// The unique ID of the code interpreter tool call.
1907 pub id: String,
1908 /// The outputs generated by the code interpreter, such as logs or images.
1909 /// Can be null if no outputs are available.
1910 #[serde(skip_serializing_if = "Option::is_none")]
1911 pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
1912 /// The status of the code interpreter tool call.
1913 /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
1914 pub status: CodeInterpreterToolCallStatus,
1915}
1916
1917/// Individual result from a code interpreter: either logs or files.
1918#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1919#[serde(tag = "type", rename_all = "snake_case")]
1920pub enum CodeInterpreterToolCallOutput {
1921 /// Code interpreter output logs
1922 Logs(CodeInterpreterOutputLogs),
1923 /// Code interpreter output image
1924 Image(CodeInterpreterOutputImage),
1925}
1926
1927#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1928pub struct CodeInterpreterOutputLogs {
1929 /// The logs output from the code interpreter.
1930 pub logs: String,
1931}
1932
1933#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1934pub struct CodeInterpreterOutputImage {
1935 /// The URL of the image output from the code interpreter.
1936 pub url: String,
1937}
1938
1939#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1940pub struct CodeInterpreterFile {
1941 /// The ID of the file.
1942 file_id: String,
1943 /// The MIME type of the file.
1944 mime_type: String,
1945}
1946
1947#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1948pub struct LocalShellToolCall {
1949 /// Execute a shell command on the server.
1950 pub action: LocalShellExecAction,
1951 /// The unique ID of the local shell tool call generated by the model.
1952 pub call_id: String,
1953 /// The unique ID of the local shell call.
1954 pub id: String,
1955 /// The status of the local shell call.
1956 pub status: OutputStatus,
1957}
1958
1959/// Define the shape of a local shell action (exec).
1960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1961pub struct LocalShellExecAction {
1962 /// The command to run.
1963 pub command: Vec<String>,
1964 /// Environment variables to set for the command.
1965 pub env: HashMap<String, String>,
1966 /// Optional timeout in milliseconds for the command.
1967 pub timeout_ms: Option<u64>,
1968 /// Optional user to run the command as.
1969 pub user: Option<String>,
1970 /// Optional working directory to run the command in.
1971 pub working_directory: Option<String>,
1972}
1973
1974/// Commands and limits describing how to run the function shell tool call.
1975#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1976pub struct FunctionShellActionParam {
1977 /// Ordered shell commands for the execution environment to run.
1978 pub commands: Vec<String>,
1979 /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
1980 #[serde(skip_serializing_if = "Option::is_none")]
1981 pub timeout_ms: Option<u64>,
1982 /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
1983 #[serde(skip_serializing_if = "Option::is_none")]
1984 pub max_output_length: Option<u64>,
1985}
1986
1987/// Status values reported for function shell tool calls.
1988#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1989#[serde(rename_all = "snake_case")]
1990pub enum FunctionShellCallItemStatus {
1991 InProgress,
1992 Completed,
1993 Incomplete,
1994}
1995
1996/// A tool representing a request to execute one or more shell commands.
1997#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1998pub struct FunctionShellCallItemParam {
1999 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2000 #[serde(skip_serializing_if = "Option::is_none")]
2001 pub id: Option<String>,
2002 /// The unique ID of the function shell tool call generated by the model.
2003 pub call_id: String,
2004 /// The shell commands and limits that describe how to run the tool call.
2005 pub action: FunctionShellActionParam,
2006 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2007 #[serde(skip_serializing_if = "Option::is_none")]
2008 pub status: Option<FunctionShellCallItemStatus>,
2009}
2010
2011/// Indicates that the shell commands finished and returned an exit code.
2012#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2013pub struct FunctionShellCallOutputExitOutcomeParam {
2014 /// The exit code returned by the shell process.
2015 pub exit_code: i32,
2016}
2017
2018/// The exit or timeout outcome associated with this chunk.
2019#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2020#[serde(tag = "type", rename_all = "snake_case")]
2021pub enum FunctionShellCallOutputOutcomeParam {
2022 Timeout,
2023 Exit(FunctionShellCallOutputExitOutcomeParam),
2024}
2025
2026/// Captured stdout and stderr for a portion of a function shell tool call output.
2027#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2028pub struct FunctionShellCallOutputContentParam {
2029 /// Captured stdout output for this chunk of the shell call.
2030 pub stdout: String,
2031 /// Captured stderr output for this chunk of the shell call.
2032 pub stderr: String,
2033 /// The exit or timeout outcome associated with this chunk.
2034 pub outcome: FunctionShellCallOutputOutcomeParam,
2035}
2036
2037/// The streamed output items emitted by a function shell tool call.
2038#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2039pub struct FunctionShellCallOutputItemParam {
2040 /// The unique ID of the function shell tool call output. Populated when this item is returned via API.
2041 #[serde(skip_serializing_if = "Option::is_none")]
2042 pub id: Option<String>,
2043 /// The unique ID of the function shell tool call generated by the model.
2044 pub call_id: String,
2045 /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2046 pub output: Vec<FunctionShellCallOutputContentParam>,
2047 /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2048 #[serde(skip_serializing_if = "Option::is_none")]
2049 pub max_output_length: Option<u64>,
2050}
2051
2052/// Status values reported for apply_patch tool calls.
2053#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2054#[serde(rename_all = "snake_case")]
2055pub enum ApplyPatchCallStatusParam {
2056 InProgress,
2057 Completed,
2058}
2059
2060/// Instruction for creating a new file via the apply_patch tool.
2061#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2062pub struct ApplyPatchCreateFileOperationParam {
2063 /// Path of the file to create relative to the workspace root.
2064 pub path: String,
2065 /// Unified diff content to apply when creating the file.
2066 pub diff: String,
2067}
2068
2069/// Instruction for deleting an existing file via the apply_patch tool.
2070#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2071pub struct ApplyPatchDeleteFileOperationParam {
2072 /// Path of the file to delete relative to the workspace root.
2073 pub path: String,
2074}
2075
2076/// Instruction for updating an existing file via the apply_patch tool.
2077#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2078pub struct ApplyPatchUpdateFileOperationParam {
2079 /// Path of the file to update relative to the workspace root.
2080 pub path: String,
2081 /// Unified diff content to apply to the existing file.
2082 pub diff: String,
2083}
2084
2085/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2086#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2087#[serde(tag = "type", rename_all = "snake_case")]
2088pub enum ApplyPatchOperationParam {
2089 CreateFile(ApplyPatchCreateFileOperationParam),
2090 DeleteFile(ApplyPatchDeleteFileOperationParam),
2091 UpdateFile(ApplyPatchUpdateFileOperationParam),
2092}
2093
2094/// A tool call representing a request to create, delete, or update files using diff patches.
2095#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2096pub struct ApplyPatchToolCallItemParam {
2097 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2098 #[serde(skip_serializing_if = "Option::is_none")]
2099 pub id: Option<String>,
2100 /// The unique ID of the apply patch tool call generated by the model.
2101 pub call_id: String,
2102 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2103 pub status: ApplyPatchCallStatusParam,
2104 /// The specific create, delete, or update instruction for the apply_patch tool call.
2105 pub operation: ApplyPatchOperationParam,
2106}
2107
2108/// Outcome values reported for apply_patch tool call outputs.
2109#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2110#[serde(rename_all = "snake_case")]
2111pub enum ApplyPatchCallOutputStatusParam {
2112 Completed,
2113 Failed,
2114}
2115
2116/// The streamed output emitted by an apply patch tool call.
2117#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2118pub struct ApplyPatchToolCallOutputItemParam {
2119 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2120 #[serde(skip_serializing_if = "Option::is_none")]
2121 pub id: Option<String>,
2122 /// The unique ID of the apply patch tool call generated by the model.
2123 pub call_id: String,
2124 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2125 pub status: ApplyPatchCallOutputStatusParam,
2126 /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2127 #[serde(skip_serializing_if = "Option::is_none")]
2128 pub output: Option<String>,
2129}
2130
2131/// Shell exec action
2132/// Execute a shell command.
2133#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2134pub struct FunctionShellAction {
2135 /// A list of commands to run.
2136 pub commands: Vec<String>,
2137 /// Optional timeout in milliseconds for the commands.
2138 pub timeout_ms: Option<u64>,
2139 /// Optional maximum number of characters to return from each command.
2140 pub max_output_length: Option<u64>,
2141}
2142
2143/// Status values reported for function shell tool calls.
2144#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2145#[serde(rename_all = "snake_case")]
2146pub enum LocalShellCallStatus {
2147 InProgress,
2148 Completed,
2149 Incomplete,
2150}
2151
2152/// A tool call that executes one or more shell commands in a managed environment.
2153#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2154pub struct FunctionShellCall {
2155 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2156 pub id: String,
2157 /// The unique ID of the function shell tool call generated by the model.
2158 pub call_id: String,
2159 /// The shell commands and limits that describe how to run the tool call.
2160 pub action: FunctionShellAction,
2161 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2162 pub status: LocalShellCallStatus,
2163 /// The ID of the entity that created this tool call.
2164 #[serde(skip_serializing_if = "Option::is_none")]
2165 pub created_by: Option<String>,
2166}
2167
2168/// The content of a shell call output.
2169#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2170pub struct FunctionShellCallOutputContent {
2171 pub stdout: String,
2172 pub stderr: String,
2173 /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2174 #[serde(flatten)]
2175 pub outcome: FunctionShellCallOutputOutcome,
2176 #[serde(skip_serializing_if = "Option::is_none")]
2177 pub created_by: Option<String>,
2178}
2179
2180/// Function shell call outcome
2181#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2182#[serde(tag = "type", rename_all = "snake_case")]
2183pub enum FunctionShellCallOutputOutcome {
2184 Timeout,
2185 Exit(FunctionShellCallOutputExitOutcome),
2186}
2187
2188/// Indicates that the shell commands finished and returned an exit code.
2189#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2190pub struct FunctionShellCallOutputExitOutcome {
2191 /// Exit code from the shell process.
2192 pub exit_code: i32,
2193}
2194
2195/// The output of a shell tool call.
2196#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2197pub struct FunctionShellCallOutput {
2198 /// The unique ID of the shell call output. Populated when this item is returned via API.
2199 pub id: String,
2200 /// The unique ID of the shell tool call generated by the model.
2201 pub call_id: String,
2202 /// An array of shell call output contents
2203 pub output: Vec<FunctionShellCallOutputContent>,
2204 /// The maximum length of the shell command output. This is generated by the model and should be
2205 /// passed back with the raw output.
2206 pub max_output_length: Option<u64>,
2207 #[serde(skip_serializing_if = "Option::is_none")]
2208 pub created_by: Option<String>,
2209}
2210
2211/// Status values reported for apply_patch tool calls.
2212#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2213#[serde(rename_all = "snake_case")]
2214pub enum ApplyPatchCallStatus {
2215 InProgress,
2216 Completed,
2217}
2218
2219/// Instruction describing how to create a file via the apply_patch tool.
2220#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2221pub struct ApplyPatchCreateFileOperation {
2222 /// Path of the file to create.
2223 pub path: String,
2224 /// Diff to apply.
2225 pub diff: String,
2226}
2227
2228/// Instruction describing how to delete a file via the apply_patch tool.
2229#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2230pub struct ApplyPatchDeleteFileOperation {
2231 /// Path of the file to delete.
2232 pub path: String,
2233}
2234
2235/// Instruction describing how to update a file via the apply_patch tool.
2236#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2237pub struct ApplyPatchUpdateFileOperation {
2238 /// Path of the file to update.
2239 pub path: String,
2240 /// Diff to apply.
2241 pub diff: String,
2242}
2243
2244/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2245#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2246#[serde(tag = "type", rename_all = "snake_case")]
2247pub enum ApplyPatchOperation {
2248 CreateFile(ApplyPatchCreateFileOperation),
2249 DeleteFile(ApplyPatchDeleteFileOperation),
2250 UpdateFile(ApplyPatchUpdateFileOperation),
2251}
2252
2253/// A tool call that applies file diffs by creating, deleting, or updating files.
2254#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2255pub struct ApplyPatchToolCall {
2256 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2257 pub id: String,
2258 /// The unique ID of the apply patch tool call generated by the model.
2259 pub call_id: String,
2260 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2261 pub status: ApplyPatchCallStatus,
2262 /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2263 pub operation: ApplyPatchOperation,
2264 /// The ID of the entity that created this tool call.
2265 #[serde(skip_serializing_if = "Option::is_none")]
2266 pub created_by: Option<String>,
2267}
2268
2269/// Outcome values reported for apply_patch tool call outputs.
2270#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2271#[serde(rename_all = "snake_case")]
2272pub enum ApplyPatchCallOutputStatus {
2273 Completed,
2274 Failed,
2275}
2276
2277/// The output emitted by an apply patch tool call.
2278#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2279pub struct ApplyPatchToolCallOutput {
2280 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2281 pub id: String,
2282 /// The unique ID of the apply patch tool call generated by the model.
2283 pub call_id: String,
2284 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2285 pub status: ApplyPatchCallOutputStatus,
2286 /// Optional textual output returned by the apply patch tool.
2287 pub output: Option<String>,
2288 /// The ID of the entity that created this tool call output.
2289 #[serde(skip_serializing_if = "Option::is_none")]
2290 pub created_by: Option<String>,
2291}
2292
2293/// Output of an MCP server tool invocation.
2294#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2295pub struct MCPToolCall {
2296 /// A JSON string of the arguments passed to the tool.
2297 pub arguments: String,
2298 /// The unique ID of the tool call.
2299 pub id: String,
2300 /// The name of the tool that was run.
2301 pub name: String,
2302 /// The label of the MCP server running the tool.
2303 pub server_label: String,
2304 /// Unique identifier for the MCP tool call approval request. Include this value
2305 /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2306 /// tool call.
2307 pub approval_request_id: Option<String>,
2308 /// Error message from the call, if any.
2309 pub error: Option<String>,
2310 /// The output from the tool call.
2311 pub output: Option<String>,
2312 /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2313 /// `calling`, or `failed`.
2314 pub status: Option<MCPToolCallStatus>,
2315}
2316
2317#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2318#[serde(rename_all = "snake_case")]
2319pub enum MCPToolCallStatus {
2320 InProgress,
2321 Completed,
2322 Incomplete,
2323 Calling,
2324 Failed,
2325}
2326
2327#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2328pub struct MCPListTools {
2329 /// The unique ID of the list.
2330 pub id: String,
2331 /// The label of the MCP server.
2332 pub server_label: String,
2333 /// The tools available on the server.
2334 pub tools: Vec<MCPListToolsTool>,
2335 /// Error message if listing failed.
2336 #[serde(skip_serializing_if = "Option::is_none")]
2337 pub error: Option<String>,
2338}
2339
2340#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2341pub struct MCPApprovalRequest {
2342 /// JSON string of arguments for the tool.
2343 pub arguments: String,
2344 /// The unique ID of the approval request.
2345 pub id: String,
2346 /// The name of the tool to run.
2347 pub name: String,
2348 /// The label of the MCP server making the request.
2349 pub server_label: String,
2350}
2351
2352#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2353#[serde(untagged)]
2354pub enum Instructions {
2355 /// A text input to the model, equivalent to a text input with the `developer` role.
2356 Text(String),
2357 /// A list of one or many input items to the model, containing different content types.
2358 Array(Vec<InputItem>),
2359}
2360
2361/// The complete response returned by the Responses API.
2362#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2363pub struct Response {
2364 /// Whether to run the model response in the background.
2365 /// [Learn more](https://platform.openai.com/docs/guides/background).
2366 #[serde(skip_serializing_if = "Option::is_none")]
2367 pub background: Option<bool>,
2368
2369 /// Billing information for the response.
2370 #[serde(skip_serializing_if = "Option::is_none")]
2371 pub billing: Option<Billing>,
2372
2373 /// The conversation that this response belongs to. Input items and output
2374 /// items from this response are automatically added to this conversation.
2375 #[serde(skip_serializing_if = "Option::is_none")]
2376 pub conversation: Option<Conversation>,
2377
2378 /// Unix timestamp (in seconds) when this Response was created.
2379 pub created_at: u64,
2380
2381 /// An error object returned when the model fails to generate a Response.
2382 #[serde(skip_serializing_if = "Option::is_none")]
2383 pub error: Option<ErrorObject>,
2384
2385 /// Unique identifier for this response.
2386 pub id: String,
2387
2388 /// Details about why the response is incomplete, if any.
2389 #[serde(skip_serializing_if = "Option::is_none")]
2390 pub incomplete_details: Option<IncompleteDetails>,
2391
2392 /// A system (or developer) message inserted into the model's context.
2393 ///
2394 /// When using along with `previous_response_id`, the instructions from a previous response
2395 /// will not be carried over to the next response. This makes it simple to swap out
2396 /// system (or developer) messages in new responses.
2397 #[serde(skip_serializing_if = "Option::is_none")]
2398 pub instructions: Option<Instructions>,
2399
2400 /// An upper bound for the number of tokens that can be generated for a response,
2401 /// including visible output tokens and
2402 /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2403 #[serde(skip_serializing_if = "Option::is_none")]
2404 pub max_output_tokens: Option<u32>,
2405
2406 /// Set of 16 key-value pairs that can be attached to an object. This can be
2407 /// useful for storing additional information about the object in a structured
2408 /// format, and querying for objects via API or the dashboard.
2409 ///
2410 /// Keys are strings with a maximum length of 64 characters. Values are strings
2411 /// with a maximum length of 512 characters.
2412 #[serde(skip_serializing_if = "Option::is_none")]
2413 pub metadata: Option<HashMap<String, String>>,
2414
2415 /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2416 /// wide range of models with different capabilities, performance characteristics,
2417 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2418 pub model: String,
2419
2420 /// The object type of this resource - always set to `response`.
2421 pub object: String,
2422
2423 /// An array of content items generated by the model.
2424 ///
2425 /// - The length and order of items in the output array is dependent on the model's response.
2426 /// - Rather than accessing the first item in the output array and assuming it's an assistant
2427 /// message with the content generated by the model, you might consider using
2428 /// the `output_text` property where supported in SDKs.
2429 pub output: Vec<OutputItem>,
2430
2431 /// SDK-only convenience property that contains the aggregated text output from all
2432 /// `output_text` items in the `output` array, if any are present.
2433 /// Supported in the Python and JavaScript SDKs.
2434 // #[serde(skip_serializing_if = "Option::is_none")]
2435 // pub output_text: Option<String>,
2436
2437 /// Whether to allow the model to run tool calls in parallel.
2438 #[serde(skip_serializing_if = "Option::is_none")]
2439 pub parallel_tool_calls: Option<bool>,
2440
2441 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2442 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2443 /// Cannot be used in conjunction with `conversation`.
2444 #[serde(skip_serializing_if = "Option::is_none")]
2445 pub previous_response_id: Option<String>,
2446
2447 /// Reference to a prompt template and its variables.
2448 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2449 #[serde(skip_serializing_if = "Option::is_none")]
2450 pub prompt: Option<Prompt>,
2451
2452 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2453 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2454 #[serde(skip_serializing_if = "Option::is_none")]
2455 pub prompt_cache_key: Option<String>,
2456
2457 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2458 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2459 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2460 #[serde(skip_serializing_if = "Option::is_none")]
2461 pub prompt_cache_retention: Option<PromptCacheRetention>,
2462
2463 /// **gpt-5 and o-series models only**
2464 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2465 #[serde(skip_serializing_if = "Option::is_none")]
2466 pub reasoning: Option<Reasoning>,
2467
2468 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2469 /// usage policies.
2470 ///
2471 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2472 /// or email address, in order to avoid sending us any identifying information. [Learn
2473 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2474 #[serde(skip_serializing_if = "Option::is_none")]
2475 pub safety_identifier: Option<String>,
2476
2477 /// Specifies the processing type used for serving the request.
2478 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2479 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2480 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2481 /// - When not set, the default behavior is 'auto'.
2482 ///
2483 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2484 #[serde(skip_serializing_if = "Option::is_none")]
2485 pub service_tier: Option<ServiceTier>,
2486
2487 /// The status of the response generation.
2488 /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2489 pub status: Status,
2490
2491 /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2492 /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2493 ///
2494 /// We generally recommend altering this or `top_p` but not both.
2495 #[serde(skip_serializing_if = "Option::is_none")]
2496 pub temperature: Option<f32>,
2497
2498 /// Configuration options for a text response from the model. Can be plain
2499 /// text or structured JSON data. Learn more:
2500 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2501 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2502 #[serde(skip_serializing_if = "Option::is_none")]
2503 pub text: Option<ResponseTextParam>,
2504
2505 /// How the model should select which tool (or tools) to use when generating
2506 /// a response. See the `tools` parameter to see how to specify which tools
2507 /// the model can call.
2508 #[serde(skip_serializing_if = "Option::is_none")]
2509 pub tool_choice: Option<ToolChoiceParam>,
2510
2511 /// An array of tools the model may call while generating a response. You
2512 /// can specify which tool to use by setting the `tool_choice` parameter.
2513 ///
2514 /// We support the following categories of tools:
2515 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2516 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2517 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2518 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
2519 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2520 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
2521 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2522 /// - **Function calls (custom tools)**: Functions that are defined by you,
2523 /// enabling the model to call your own code with strongly typed arguments
2524 /// and outputs. Learn more about
2525 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2526 /// custom tools to call your own code.
2527 #[serde(skip_serializing_if = "Option::is_none")]
2528 pub tools: Option<Vec<Tool>>,
2529
2530 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2531 /// token position, each with an associated log probability.
2532 #[serde(skip_serializing_if = "Option::is_none")]
2533 pub top_logprobs: Option<u8>,
2534
2535 /// An alternative to sampling with temperature, called nucleus sampling,
2536 /// where the model considers the results of the tokens with top_p probability
2537 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2538 /// are considered.
2539 ///
2540 /// We generally recommend altering this or `temperature` but not both.
2541 #[serde(skip_serializing_if = "Option::is_none")]
2542 pub top_p: Option<f32>,
2543
2544 ///The truncation strategy to use for the model response.
2545 /// - `auto`: If the input to this Response exceeds
2546 /// the model's context window size, the model will truncate the
2547 /// response to fit the context window by dropping items from the beginning of the conversation.
2548 /// - `disabled` (default): If the input size will exceed the context window
2549 /// size for a model, the request will fail with a 400 error.
2550 #[serde(skip_serializing_if = "Option::is_none")]
2551 pub truncation: Option<Truncation>,
2552
2553 /// Represents token usage details including input tokens, output tokens,
2554 /// a breakdown of output tokens, and the total tokens used.
2555 #[serde(skip_serializing_if = "Option::is_none")]
2556 pub usage: Option<ResponseUsage>,
2557}
2558
2559#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2560#[serde(rename_all = "snake_case")]
2561pub enum Status {
2562 Completed,
2563 Failed,
2564 InProgress,
2565 Cancelled,
2566 Queued,
2567 Incomplete,
2568}
2569
2570/// Output item
2571#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2572#[serde(tag = "type")]
2573#[serde(rename_all = "snake_case")]
2574pub enum OutputItem {
2575 /// An output message from the model.
2576 Message(OutputMessage),
2577 /// The results of a file search tool call. See the
2578 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2579 /// for more information.
2580 FileSearchCall(FileSearchToolCall),
2581 /// A tool call to run a function. See the
2582 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2583 /// for more information.
2584 FunctionCall(FunctionToolCall),
2585 /// The results of a web search tool call. See the
2586 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2587 /// for more information.
2588 WebSearchCall(WebSearchToolCall),
2589 /// A tool call to a computer use tool. See the
2590 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2591 /// for more information.
2592 ComputerCall(ComputerToolCall),
2593 /// A description of the chain of thought used by a reasoning model while generating
2594 /// a response. Be sure to include these items in your `input` to the Responses API for
2595 /// subsequent turns of a conversation if you are manually
2596 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2597 Reasoning(ReasoningItem),
2598 /// An image generation request made by the model.
2599 ImageGenerationCall(ImageGenToolCall),
2600 /// A tool call to run code.
2601 CodeInterpreterCall(CodeInterpreterToolCall),
2602 /// A tool call to run a command on the local shell.
2603 LocalShellCall(LocalShellToolCall),
2604 /// A tool call that executes one or more shell commands in a managed environment.
2605 ShellCall(FunctionShellCall),
2606 /// The output of a shell tool call.
2607 ShellCallOutput(FunctionShellCallOutput),
2608 /// A tool call that applies file diffs by creating, deleting, or updating files.
2609 ApplyPatchCall(ApplyPatchToolCall),
2610 /// The output emitted by an apply patch tool call.
2611 ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2612 /// An invocation of a tool on an MCP server.
2613 McpCall(MCPToolCall),
2614 /// A list of tools available on an MCP server.
2615 McpListTools(MCPListTools),
2616 /// A request for human approval of a tool invocation.
2617 McpApprovalRequest(MCPApprovalRequest),
2618 /// A call to a custom tool created by the model.
2619 CustomToolCall(CustomToolCall),
2620}
2621
2622#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2623#[non_exhaustive]
2624pub struct CustomToolCall {
2625 /// An identifier used to map this custom tool call to a tool call output.
2626 pub call_id: String,
2627 /// The input for the custom tool call generated by the model.
2628 pub input: String,
2629 /// The name of the custom tool being called.
2630 pub name: String,
2631 /// The unique ID of the custom tool call in the OpenAI platform.
2632 pub id: String,
2633}
2634
2635#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2636pub struct DeleteResponse {
2637 pub object: String,
2638 pub deleted: bool,
2639 pub id: String,
2640}
2641
2642#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2643pub struct AnyItemReference {
2644 pub r#type: Option<String>,
2645 pub id: String,
2646}
2647
2648#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2649#[serde(tag = "type", rename_all = "snake_case")]
2650pub enum ItemResourceItem {
2651 Message(MessageItem),
2652 FileSearchCall(FileSearchToolCall),
2653 ComputerCall(ComputerToolCall),
2654 ComputerCallOutput(ComputerCallOutputItemParam),
2655 WebSearchCall(WebSearchToolCall),
2656 FunctionCall(FunctionToolCall),
2657 FunctionCallOutput(FunctionCallOutputItemParam),
2658 ImageGenerationCall(ImageGenToolCall),
2659 CodeInterpreterCall(CodeInterpreterToolCall),
2660 LocalShellCall(LocalShellToolCall),
2661 LocalShellCallOutput(LocalShellToolCallOutput),
2662 ShellCall(FunctionShellCallItemParam),
2663 ShellCallOutput(FunctionShellCallOutputItemParam),
2664 ApplyPatchCall(ApplyPatchToolCallItemParam),
2665 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2666 McpListTools(MCPListTools),
2667 McpApprovalRequest(MCPApprovalRequest),
2668 McpApprovalResponse(MCPApprovalResponse),
2669 McpCall(MCPToolCall),
2670}
2671
2672#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2673#[serde(untagged)]
2674pub enum ItemResource {
2675 ItemReference(AnyItemReference),
2676 Item(ItemResourceItem),
2677}
2678
2679/// A list of Response items.
2680#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2681pub struct ResponseItemList {
2682 /// The type of object returned, must be `list`.
2683 pub object: String,
2684 /// The ID of the first item in the list.
2685 pub first_id: Option<String>,
2686 /// The ID of the last item in the list.
2687 pub last_id: Option<String>,
2688 /// Whether there are more items in the list.
2689 pub has_more: bool,
2690 /// The list of items.
2691 pub data: Vec<ItemResource>,
2692}
2693
2694#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2695#[builder(
2696 name = "TokenCountsBodyArgs",
2697 pattern = "mutable",
2698 setter(into, strip_option),
2699 default
2700)]
2701#[builder(build_fn(error = "OpenAIError"))]
2702pub struct TokenCountsBody {
2703 /// The conversation that this response belongs to. Items from this
2704 /// conversation are prepended to `input_items` for this response request.
2705 /// Input items and output items from this response are automatically added to this
2706 /// conversation after this response completes.
2707 #[serde(skip_serializing_if = "Option::is_none")]
2708 pub conversation: Option<ConversationParam>,
2709
2710 /// Text, image, or file inputs to the model, used to generate a response
2711 #[serde(skip_serializing_if = "Option::is_none")]
2712 pub input: Option<InputParam>,
2713
2714 /// A system (or developer) message inserted into the model's context.
2715 ///
2716 /// When used along with `previous_response_id`, the instructions from a previous response will
2717 /// not be carried over to the next response. This makes it simple to swap out system (or
2718 /// developer) messages in new responses.
2719 #[serde(skip_serializing_if = "Option::is_none")]
2720 pub instructions: Option<String>,
2721
2722 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2723 /// wide range of models with different capabilities, performance characteristics,
2724 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2725 /// to browse and compare available models.
2726 #[serde(skip_serializing_if = "Option::is_none")]
2727 pub model: Option<String>,
2728
2729 /// Whether to allow the model to run tool calls in parallel.
2730 #[serde(skip_serializing_if = "Option::is_none")]
2731 pub parallel_tool_calls: Option<bool>,
2732
2733 /// The unique ID of the previous response to the model. Use this to create multi-turn
2734 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2735 /// Cannot be used in conjunction with `conversation`.
2736 #[serde(skip_serializing_if = "Option::is_none")]
2737 pub previous_response_id: Option<String>,
2738
2739 /// **gpt-5 and o-series models only**
2740 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2741 #[serde(skip_serializing_if = "Option::is_none")]
2742 pub reasoning: Option<Reasoning>,
2743
2744 /// Configuration options for a text response from the model. Can be plain
2745 /// text or structured JSON data. Learn more:
2746 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2747 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2748 #[serde(skip_serializing_if = "Option::is_none")]
2749 pub text: Option<ResponseTextParam>,
2750
2751 /// How the model should select which tool (or tools) to use when generating
2752 /// a response. See the `tools` parameter to see how to specify which tools
2753 /// the model can call.
2754 #[serde(skip_serializing_if = "Option::is_none")]
2755 pub tool_choice: Option<ToolChoiceParam>,
2756
2757 /// An array of tools the model may call while generating a response. You can specify which tool
2758 /// to use by setting the `tool_choice` parameter.
2759 #[serde(skip_serializing_if = "Option::is_none")]
2760 pub tools: Option<Vec<Tool>>,
2761
2762 ///The truncation strategy to use for the model response.
2763 /// - `auto`: If the input to this Response exceeds
2764 /// the model's context window size, the model will truncate the
2765 /// response to fit the context window by dropping items from the beginning of the conversation.
2766 /// - `disabled` (default): If the input size will exceed the context window
2767 /// size for a model, the request will fail with a 400 error.
2768 #[serde(skip_serializing_if = "Option::is_none")]
2769 pub truncation: Option<Truncation>,
2770}
2771
2772#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2773pub struct TokenCountsResource {
2774 pub object: String,
2775 pub input_tokens: u32,
2776}