async_openai/types/responses/response.rs
1use crate::error::OpenAIError;
2use crate::types::mcp::{MCPListToolsTool, MCPTool};
3use crate::types::responses::{
4 CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema,
5 ResponseUsage, SummaryTextContent,
6};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Labels an `assistant` message as intermediate commentary or the final answer.
12/// For models like `gpt-5.3-codex` and beyond, when sending follow-up requests, preserve and resend
13/// phase on all assistant messages — dropping it can degrade performance.
14#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
15#[serde(rename_all = "snake_case")]
16pub enum MessagePhase {
17 Commentary,
18 FinalAnswer,
19}
20
21/// Whether tool search was executed by the server or by the client.
22#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
23#[serde(rename_all = "snake_case")]
24pub enum ToolSearchExecutionType {
25 Server,
26 Client,
27}
28
29/// The type of content to search for.
30#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
31#[serde(rename_all = "snake_case")]
32pub enum SearchContentType {
33 Text,
34 Image,
35}
36
37/// The status of a function call.
38#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
39#[serde(rename_all = "snake_case")]
40pub enum FunctionCallStatus {
41 InProgress,
42 Completed,
43 Incomplete,
44}
45
46/// The status of a function call output.
47#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
48#[serde(rename_all = "snake_case")]
49pub enum FunctionCallOutputStatusEnum {
50 InProgress,
51 Completed,
52 Incomplete,
53}
54
55/// A tool that controls a virtual computer. Learn more about the
56/// [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
57#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
58pub struct ComputerTool {}
59
60/// Groups function/custom tools under a shared namespace.
61#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Builder, Default)]
62#[builder(
63 name = "NamespaceToolParamArgs",
64 pattern = "mutable",
65 setter(into, strip_option),
66 default
67)]
68#[builder(build_fn(error = "OpenAIError"))]
69pub struct NamespaceToolParam {
70 /// The namespace name used in tool calls (for example, `crm`).
71 pub name: String,
72 /// A description of the namespace shown to the model.
73 pub description: String,
74 /// The function/custom tools available inside this namespace.
75 pub tools: Vec<NamespaceToolParamTool>,
76}
77
78/// A function or custom tool that belongs to a namespace.
79#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
80#[serde(tag = "type", rename_all = "snake_case")]
81pub enum NamespaceToolParamTool {
82 Function(FunctionToolParam),
83 Custom(CustomToolParam),
84}
85
86/// A function tool that can be used within a namespace or with tool search.
87#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
88#[builder(
89 name = "FunctionToolParamArgs",
90 pattern = "mutable",
91 setter(into, strip_option),
92 default
93)]
94#[builder(build_fn(error = "OpenAIError"))]
95pub struct FunctionToolParam {
96 /// The name of the function.
97 pub name: String,
98 /// A description of the function.
99 #[serde(skip_serializing_if = "Option::is_none")]
100 pub description: Option<String>,
101 /// A JSON schema object describing the parameters of the function.
102 #[serde(skip_serializing_if = "Option::is_none")]
103 pub parameters: Option<serde_json::Value>,
104 /// Whether to enforce strict parameter validation.
105 #[serde(skip_serializing_if = "Option::is_none")]
106 pub strict: Option<bool>,
107 /// Whether this function should be deferred and discovered via tool search.
108 #[serde(skip_serializing_if = "Option::is_none")]
109 pub defer_loading: Option<bool>,
110}
111
112/// Hosted or BYOT tool search configuration for deferred tools.
113#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
114#[builder(
115 name = "ToolSearchToolParamArgs",
116 pattern = "mutable",
117 setter(into, strip_option),
118 default
119)]
120#[builder(build_fn(error = "OpenAIError"))]
121pub struct ToolSearchToolParam {
122 /// Whether tool search is executed by the server or by the client.
123 #[serde(skip_serializing_if = "Option::is_none")]
124 pub execution: Option<ToolSearchExecutionType>,
125 /// Description shown to the model for a client-executed tool search tool.
126 #[serde(skip_serializing_if = "Option::is_none")]
127 pub description: Option<String>,
128 /// Parameter schema for a client-executed tool search tool.
129 #[serde(skip_serializing_if = "Option::is_none")]
130 pub parameters: Option<serde_json::Value>,
131}
132
133/// A tool search call output item.
134#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
135pub struct ToolSearchCall {
136 /// The unique ID of the tool search call item.
137 pub id: String,
138 /// The unique ID of the tool search call generated by the model.
139 pub call_id: Option<String>,
140 /// Whether tool search was executed by the server or by the client.
141 pub execution: ToolSearchExecutionType,
142 /// Arguments used for the tool search call.
143 pub arguments: serde_json::Value,
144 /// The status of the tool search call item.
145 pub status: FunctionCallStatus,
146 /// The identifier of the actor that created the item.
147 #[serde(skip_serializing_if = "Option::is_none")]
148 pub created_by: Option<String>,
149}
150
151/// A tool search call input item.
152#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
153pub struct ToolSearchCallItemParam {
154 /// The unique ID of this tool search call.
155 #[serde(skip_serializing_if = "Option::is_none")]
156 pub id: Option<String>,
157 /// The unique ID of the tool search call generated by the model.
158 #[serde(skip_serializing_if = "Option::is_none")]
159 pub call_id: Option<String>,
160 /// Whether tool search was executed by the server or by the client.
161 #[serde(skip_serializing_if = "Option::is_none")]
162 pub execution: Option<ToolSearchExecutionType>,
163 /// The arguments supplied to the tool search call.
164 #[serde(default)]
165 pub arguments: serde_json::Value,
166 /// The status of the tool search call.
167 #[serde(skip_serializing_if = "Option::is_none")]
168 pub status: Option<OutputStatus>,
169}
170
171/// A tool search output item.
172#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
173pub struct ToolSearchOutput {
174 /// The unique ID of the tool search output item.
175 pub id: String,
176 /// The unique ID of the tool search call generated by the model.
177 pub call_id: Option<String>,
178 /// Whether tool search was executed by the server or by the client.
179 pub execution: ToolSearchExecutionType,
180 /// The loaded tool definitions returned by tool search.
181 pub tools: Vec<Tool>,
182 /// The status of the tool search output item.
183 pub status: FunctionCallOutputStatusEnum,
184 /// The identifier of the actor that created the item.
185 #[serde(skip_serializing_if = "Option::is_none")]
186 pub created_by: Option<String>,
187}
188
189/// A tool search output input item.
190#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
191pub struct ToolSearchOutputItemParam {
192 /// The unique ID of this tool search output.
193 #[serde(skip_serializing_if = "Option::is_none")]
194 pub id: Option<String>,
195 /// The unique ID of the tool search call generated by the model.
196 #[serde(skip_serializing_if = "Option::is_none")]
197 pub call_id: Option<String>,
198 /// Whether tool search was executed by the server or by the client.
199 #[serde(skip_serializing_if = "Option::is_none")]
200 pub execution: Option<ToolSearchExecutionType>,
201 /// The loaded tool definitions returned by the tool search output.
202 pub tools: Vec<Tool>,
203 /// The status of the tool search output.
204 #[serde(skip_serializing_if = "Option::is_none")]
205 pub status: Option<OutputStatus>,
206}
207
208/// Role of messages in the API.
209#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
210#[serde(rename_all = "lowercase")]
211pub enum Role {
212 #[default]
213 User,
214 Assistant,
215 System,
216 Developer,
217}
218
219/// Status of input/output items.
220#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
221#[serde(rename_all = "snake_case")]
222pub enum OutputStatus {
223 InProgress,
224 Completed,
225 Incomplete,
226}
227
228#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
229#[serde(untagged)]
230pub enum InputParam {
231 /// A text input to the model, equivalent to a text input with the
232 /// `user` role.
233 Text(String),
234 /// A list of one or many input items to the model, containing
235 /// different content types.
236 Items(Vec<InputItem>),
237}
238
239/// Content item used to generate a response.
240///
241/// This is a properly discriminated union based on the `type` field, using Rust's
242/// type-safe enum with serde's tag attribute for efficient deserialization.
243///
244/// # OpenAPI Specification
245/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
246#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
247#[serde(tag = "type", rename_all = "snake_case")]
248pub enum Item {
249 /// A message (type: "message").
250 /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
251 ///
252 /// InputMessage:
253 /// A message input to the model with a role indicating instruction following hierarchy.
254 /// Instructions given with the developer or system role take precedence over instructions given with the user role.
255 /// OutputMessage:
256 /// A message output from the model.
257 Message(MessageItem),
258
259 /// The results of a file search tool call. See the
260 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
261 FileSearchCall(FileSearchToolCall),
262
263 /// A tool call to a computer use tool. See the
264 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
265 ComputerCall(ComputerToolCall),
266
267 /// The output of a computer tool call.
268 ComputerCallOutput(ComputerCallOutputItemParam),
269
270 /// The results of a web search tool call. See the
271 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
272 WebSearchCall(WebSearchToolCall),
273
274 /// A tool call to run a function. See the
275 ///
276 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
277 FunctionCall(FunctionToolCall),
278
279 /// The output of a function tool call.
280 FunctionCallOutput(FunctionCallOutputItemParam),
281
282 /// A tool search call.
283 ToolSearchCall(ToolSearchCallItemParam),
284
285 /// A tool search output.
286 ToolSearchOutput(ToolSearchOutputItemParam),
287
288 /// A description of the chain of thought used by a reasoning model while generating
289 /// a response. Be sure to include these items in your `input` to the Responses API
290 /// for subsequent turns of a conversation if you are manually
291 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
292 Reasoning(ReasoningItem),
293
294 /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
295 Compaction(CompactionSummaryItemParam),
296
297 /// An image generation request made by the model.
298 ImageGenerationCall(ImageGenToolCall),
299
300 /// A tool call to run code.
301 CodeInterpreterCall(CodeInterpreterToolCall),
302
303 /// A tool call to run a command on the local shell.
304 LocalShellCall(LocalShellToolCall),
305
306 /// The output of a local shell tool call.
307 LocalShellCallOutput(LocalShellToolCallOutput),
308
309 /// A tool representing a request to execute one or more shell commands.
310 ShellCall(FunctionShellCallItemParam),
311
312 /// The streamed output items emitted by a shell tool call.
313 ShellCallOutput(FunctionShellCallOutputItemParam),
314
315 /// A tool call representing a request to create, delete, or update files using diff patches.
316 ApplyPatchCall(ApplyPatchToolCallItemParam),
317
318 /// The streamed output emitted by an apply patch tool call.
319 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
320
321 /// A list of tools available on an MCP server.
322 McpListTools(MCPListTools),
323
324 /// A request for human approval of a tool invocation.
325 McpApprovalRequest(MCPApprovalRequest),
326
327 /// A response to an MCP approval request.
328 McpApprovalResponse(MCPApprovalResponse),
329
330 /// An invocation of a tool on an MCP server.
331 McpCall(MCPToolCall),
332
333 /// The output of a custom tool call from your code, being sent back to the model.
334 CustomToolCallOutput(CustomToolCallOutput),
335
336 /// A call to a custom tool created by the model.
337 CustomToolCall(CustomToolCall),
338}
339
340/// Input item that can be used in the context for generating a response.
341///
342/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
343/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
344/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
345/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
346///
347/// Uses untagged deserialization because these types overlap in structure.
348/// Order matters: more specific structures are tried first.
349///
350/// # OpenAPI Specification
351/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
352#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
353#[serde(untagged)]
354pub enum InputItem {
355 /// A reference to an existing item by ID.
356 /// Has a required `id` field and optional `type` (can be "item_reference" or null).
357 /// Must be tried first as it's the most minimal structure.
358 ItemReference(ItemReference),
359
360 /// All structured items with proper type discrimination.
361 /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
362 /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
363 Item(Item),
364
365 /// A simple, user-friendly message input (EasyInputMessage).
366 /// Supports string content and can include assistant role for previous responses.
367 /// Must be tried last as it's the most flexible structure.
368 ///
369 /// A message input to the model with a role indicating instruction following
370 /// hierarchy. Instructions given with the `developer` or `system` role take
371 /// precedence over instructions given with the `user` role. Messages with the
372 /// `assistant` role are presumed to have been generated by the model in previous
373 /// interactions.
374 EasyMessage(EasyInputMessage),
375}
376
377/// A message item used within the `Item` enum.
378///
379/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
380/// enum to distinguish them based on their structure:
381/// - OutputMessage: role=assistant, required id & status fields
382/// - InputMessage: role=user/system/developer, content is `Vec<ContentType>`, optional id/status
383///
384/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
385/// not part of the structured `Item` enum.
386#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
387#[serde(untagged)]
388pub enum MessageItem {
389 /// An output message from the model (role: assistant, has required id & status).
390 /// This must come first as it has the most specific structure (required id and status fields).
391 Output(OutputMessage),
392
393 /// A structured input message (role: user/system/developer, content is `Vec<ContentType>`).
394 /// Has structured content list and optional id/status fields.
395 ///
396 /// A message input to the model with a role indicating instruction following hierarchy.
397 /// Instructions given with the `developer` or `system` role take precedence over instructions
398 /// given with the `user` role.
399 Input(InputMessage),
400}
401
402/// A reference to an existing item by ID.
403#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
404pub struct ItemReference {
405 /// The type of item to reference. Can be "item_reference" or null.
406 #[serde(skip_serializing_if = "Option::is_none")]
407 pub r#type: Option<ItemReferenceType>,
408 /// The ID of the item to reference.
409 pub id: String,
410}
411
412#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
413#[serde(rename_all = "snake_case")]
414pub enum ItemReferenceType {
415 ItemReference,
416}
417
418/// Output from a function call that you're providing back to the model.
419#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
420pub struct FunctionCallOutputItemParam {
421 /// The unique ID of the function tool call generated by the model.
422 pub call_id: String,
423 /// Text, image, or file output of the function tool call.
424 pub output: FunctionCallOutput,
425 /// The unique ID of the function tool call output.
426 /// Populated when this item is returned via API.
427 #[serde(skip_serializing_if = "Option::is_none")]
428 pub id: Option<String>,
429 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
430 /// Populated when items are returned via API.
431 #[serde(skip_serializing_if = "Option::is_none")]
432 pub status: Option<OutputStatus>,
433}
434
435#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
436#[serde(untagged)]
437pub enum FunctionCallOutput {
438 /// A JSON string of the output of the function tool call.
439 Text(String),
440 Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
441}
442
443#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
444pub struct ComputerCallOutputItemParam {
445 /// The ID of the computer tool call that produced the output.
446 pub call_id: String,
447 /// A computer screenshot image used with the computer use tool.
448 pub output: ComputerScreenshotImage,
449 /// The safety checks reported by the API that have been acknowledged by the developer.
450 #[serde(skip_serializing_if = "Option::is_none")]
451 pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
452 /// The unique ID of the computer tool call output. Optional when creating.
453 #[serde(skip_serializing_if = "Option::is_none")]
454 pub id: Option<String>,
455 /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
456 /// Populated when input items are returned via API.
457 #[serde(skip_serializing_if = "Option::is_none")]
458 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
459}
460
461#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
462#[serde(rename_all = "snake_case")]
463pub enum ComputerScreenshotImageType {
464 ComputerScreenshot,
465}
466
467/// A computer screenshot image used with the computer use tool.
468#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
469pub struct ComputerScreenshotImage {
470 /// Specifies the event type. For a computer screenshot, this property is always
471 /// set to `computer_screenshot`.
472 pub r#type: ComputerScreenshotImageType,
473 /// The identifier of an uploaded file that contains the screenshot.
474 #[serde(skip_serializing_if = "Option::is_none")]
475 pub file_id: Option<String>,
476 /// The URL of the screenshot image.
477 #[serde(skip_serializing_if = "Option::is_none")]
478 pub image_url: Option<String>,
479}
480
481/// Output from a local shell tool call that you're providing back to the model.
482#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
483pub struct LocalShellToolCallOutput {
484 /// The unique ID of the local shell tool call generated by the model.
485 pub id: String,
486
487 /// A JSON string of the output of the local shell tool call.
488 pub output: String,
489
490 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
491 #[serde(skip_serializing_if = "Option::is_none")]
492 pub status: Option<OutputStatus>,
493}
494
495/// Output from a local shell command execution.
496#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
497pub struct LocalShellOutput {
498 /// The stdout output from the command.
499 #[serde(skip_serializing_if = "Option::is_none")]
500 pub stdout: Option<String>,
501
502 /// The stderr output from the command.
503 #[serde(skip_serializing_if = "Option::is_none")]
504 pub stderr: Option<String>,
505
506 /// The exit code of the command.
507 #[serde(skip_serializing_if = "Option::is_none")]
508 pub exit_code: Option<i32>,
509}
510
511/// An MCP approval response that you're providing back to the model.
512#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
513pub struct MCPApprovalResponse {
514 /// The ID of the approval request being answered.
515 pub approval_request_id: String,
516
517 /// Whether the request was approved.
518 pub approve: bool,
519
520 /// The unique ID of the approval response
521 #[serde(skip_serializing_if = "Option::is_none")]
522 pub id: Option<String>,
523
524 /// Optional reason for the decision.
525 #[serde(skip_serializing_if = "Option::is_none")]
526 pub reason: Option<String>,
527}
528
529#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
530#[serde(untagged)]
531pub enum CustomToolCallOutputOutput {
532 /// A string of the output of the custom tool call.
533 Text(String),
534 /// Text, image, or file output of the custom tool call.
535 List(Vec<InputContent>),
536}
537
538#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
539pub struct CustomToolCallOutput {
540 /// The call ID, used to map this custom tool call output to a custom tool call.
541 pub call_id: String,
542
543 /// The output from the custom tool call generated by your code.
544 /// Can be a string or an list of output content.
545 pub output: CustomToolCallOutputOutput,
546
547 /// The unique ID of the custom tool call output in the OpenAI platform.
548 #[serde(skip_serializing_if = "Option::is_none")]
549 pub id: Option<String>,
550}
551
552/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
553///
554/// This is the most user-friendly way to provide messages, supporting both simple
555/// string content and structured content. Role can include `assistant` for providing
556/// previous assistant responses.
557#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
558#[builder(
559 name = "EasyInputMessageArgs",
560 pattern = "mutable",
561 setter(into, strip_option),
562 default
563)]
564#[builder(build_fn(error = "OpenAIError"))]
565pub struct EasyInputMessage {
566 /// The type of the message input. Defaults to `message` when omitted in JSON input.
567 #[serde(default)]
568 pub r#type: MessageType,
569 /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
570 pub role: Role,
571 /// Text, image, or audio input to the model, used to generate a response.
572 /// Can also contain previous assistant responses.
573 pub content: EasyInputContent,
574 /// Labels an `assistant` message as intermediate commentary (`commentary`) or
575 /// the final answer (`final_answer`). Not used for user messages.
576 #[serde(skip_serializing_if = "Option::is_none")]
577 pub phase: Option<MessagePhase>,
578}
579
580/// A structured message input to the model (InputMessage in the OpenAPI spec).
581///
582/// This variant requires structured content (not a simple string) and does not support
583/// the `assistant` role (use OutputMessage for that). status is populated when items are returned via API.
584#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
585#[builder(
586 name = "InputMessageArgs",
587 pattern = "mutable",
588 setter(into, strip_option),
589 default
590)]
591#[builder(build_fn(error = "OpenAIError"))]
592pub struct InputMessage {
593 /// A list of one or many input items to the model, containing different content types.
594 pub content: Vec<InputContent>,
595 /// The role of the message input. One of `user`, `system`, or `developer`.
596 /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
597 pub role: InputRole,
598 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
599 /// Populated when items are returned via API.
600 #[serde(skip_serializing_if = "Option::is_none")]
601 pub status: Option<OutputStatus>,
602 /////The type of the message input. Always set to `message`.
603 //pub r#type: MessageType,
604}
605
606/// The role for an input message - can only be `user`, `system`, or `developer`.
607/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
608#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
609#[serde(rename_all = "lowercase")]
610pub enum InputRole {
611 #[default]
612 User,
613 System,
614 Developer,
615}
616
617/// Content for EasyInputMessage - can be a simple string or structured list.
618#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
619#[serde(untagged)]
620pub enum EasyInputContent {
621 /// A text input to the model.
622 Text(String),
623 /// A list of one or many input items to the model, containing different content types.
624 ContentList(Vec<InputContent>),
625}
626
627/// Parts of a message: text, image, file, or audio.
628#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
629#[serde(tag = "type", rename_all = "snake_case")]
630pub enum InputContent {
631 /// A text input to the model.
632 InputText(InputTextContent),
633 /// An image input to the model. Learn about
634 /// [image inputs](https://platform.openai.com/docs/guides/vision).
635 InputImage(InputImageContent),
636 /// A file input to the model.
637 InputFile(InputFileContent),
638}
639
640#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
641pub struct InputTextContent {
642 /// The text input to the model.
643 pub text: String,
644}
645
646#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
647#[builder(
648 name = "InputImageArgs",
649 pattern = "mutable",
650 setter(into, strip_option),
651 default
652)]
653#[builder(build_fn(error = "OpenAIError"))]
654pub struct InputImageContent {
655 /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
656 /// Defaults to `auto`.
657 pub detail: ImageDetail,
658 /// The ID of the file to be sent to the model.
659 #[serde(skip_serializing_if = "Option::is_none")]
660 pub file_id: Option<String>,
661 /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
662 /// in a data URL.
663 #[serde(skip_serializing_if = "Option::is_none")]
664 pub image_url: Option<String>,
665}
666
667#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
668#[builder(
669 name = "InputFileArgs",
670 pattern = "mutable",
671 setter(into, strip_option),
672 default
673)]
674#[builder(build_fn(error = "OpenAIError"))]
675pub struct InputFileContent {
676 /// The content of the file to be sent to the model.
677 #[serde(skip_serializing_if = "Option::is_none")]
678 file_data: Option<String>,
679 /// The ID of the file to be sent to the model.
680 #[serde(skip_serializing_if = "Option::is_none")]
681 file_id: Option<String>,
682 /// The URL of the file to be sent to the model.
683 #[serde(skip_serializing_if = "Option::is_none")]
684 file_url: Option<String>,
685 /// The name of the file to be sent to the model.
686 #[serde(skip_serializing_if = "Option::is_none")]
687 filename: Option<String>,
688}
689
690/// The conversation that this response belonged to. Input items and output items from this
691/// response were automatically added to this conversation.
692#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
693pub struct Conversation {
694 /// The unique ID of the conversation that this response was associated with.
695 pub id: String,
696}
697
698#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
699#[serde(untagged)]
700pub enum ConversationParam {
701 /// The unique ID of the conversation.
702 ConversationID(String),
703 /// The conversation that this response belongs to.
704 Object(Conversation),
705}
706
707#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
708pub enum IncludeEnum {
709 #[serde(rename = "file_search_call.results")]
710 FileSearchCallResults,
711 #[serde(rename = "web_search_call.results")]
712 WebSearchCallResults,
713 #[serde(rename = "web_search_call.action.sources")]
714 WebSearchCallActionSources,
715 #[serde(rename = "message.input_image.image_url")]
716 MessageInputImageImageUrl,
717 #[serde(rename = "computer_call_output.output.image_url")]
718 ComputerCallOutputOutputImageUrl,
719 #[serde(rename = "code_interpreter_call.outputs")]
720 CodeInterpreterCallOutputs,
721 #[serde(rename = "reasoning.encrypted_content")]
722 ReasoningEncryptedContent,
723 #[serde(rename = "message.output_text.logprobs")]
724 MessageOutputTextLogprobs,
725}
726
727#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
728pub struct ResponseStreamOptions {
729 /// When true, stream obfuscation will be enabled. Stream obfuscation adds
730 /// random characters to an `obfuscation` field on streaming delta events to
731 /// normalize payload sizes as a mitigation to certain side-channel attacks.
732 /// These obfuscation fields are included by default, but add a small amount
733 /// of overhead to the data stream. You can set `include_obfuscation` to
734 /// false to optimize for bandwidth if you trust the network links between
735 /// your application and the OpenAI API.
736 #[serde(skip_serializing_if = "Option::is_none")]
737 pub include_obfuscation: Option<bool>,
738}
739
740/// Builder for a Responses API request.
741#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
742#[builder(
743 name = "CreateResponseArgs",
744 pattern = "mutable",
745 setter(into, strip_option),
746 default
747)]
748#[builder(build_fn(error = "OpenAIError"))]
749pub struct CreateResponse {
750 /// Whether to run the model response in the background.
751 /// [Learn more](https://platform.openai.com/docs/guides/background).
752 #[serde(skip_serializing_if = "Option::is_none")]
753 pub background: Option<bool>,
754
755 /// The conversation that this response belongs to. Items from this conversation are prepended to
756 /// `input_items` for this response request.
757 ///
758 /// Input items and output items from this response are automatically added to this conversation after
759 /// this response completes.
760 #[serde(skip_serializing_if = "Option::is_none")]
761 pub conversation: Option<ConversationParam>,
762
763 /// Specify additional output data to include in the model response. Currently supported
764 /// values are:
765 ///
766 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
767 ///
768 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
769 /// interpreter tool call items.
770 ///
771 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
772 /// output.
773 ///
774 /// - `file_search_call.results`: Include the search results of the file search tool call.
775 ///
776 /// - `message.input_image.image_url`: Include image urls from the input message.
777 ///
778 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
779 ///
780 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
781 /// reasoning item outputs. This enables reasoning items to be used in multi-turn
782 /// conversations when using the Responses API statelessly (like when the `store` parameter is
783 /// set to `false`, or when an organization is enrolled in the zero data retention program).
784 #[serde(skip_serializing_if = "Option::is_none")]
785 pub include: Option<Vec<IncludeEnum>>,
786
787 /// Text, image, or file inputs to the model, used to generate a response.
788 ///
789 /// Learn more:
790 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
791 /// - [Image inputs](https://platform.openai.com/docs/guides/images)
792 /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
793 /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
794 /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
795 pub input: InputParam,
796
797 /// A system (or developer) message inserted into the model's context.
798 ///
799 /// When using along with `previous_response_id`, the instructions from a previous
800 /// response will not be carried over to the next response. This makes it simple
801 /// to swap out system (or developer) messages in new responses.
802 #[serde(skip_serializing_if = "Option::is_none")]
803 pub instructions: Option<String>,
804
805 /// An upper bound for the number of tokens that can be generated for a response, including
806 /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
807 #[serde(skip_serializing_if = "Option::is_none")]
808 pub max_output_tokens: Option<u32>,
809
810 /// The maximum number of total calls to built-in tools that can be processed in a response. This
811 /// maximum number applies across all built-in tool calls, not per individual tool. Any further
812 /// attempts to call a tool by the model will be ignored.
813 #[serde(skip_serializing_if = "Option::is_none")]
814 pub max_tool_calls: Option<u32>,
815
816 /// Set of 16 key-value pairs that can be attached to an object. This can be
817 /// useful for storing additional information about the object in a structured
818 /// format, and querying for objects via API or the dashboard.
819 ///
820 /// Keys are strings with a maximum length of 64 characters. Values are
821 /// strings with a maximum length of 512 characters.
822 #[serde(skip_serializing_if = "Option::is_none")]
823 pub metadata: Option<HashMap<String, String>>,
824
825 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
826 /// offers a wide range of models with different capabilities, performance
827 /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
828 /// to browse and compare available models.
829 #[serde(skip_serializing_if = "Option::is_none")]
830 pub model: Option<String>,
831
832 /// Whether to allow the model to run tool calls in parallel.
833 #[serde(skip_serializing_if = "Option::is_none")]
834 pub parallel_tool_calls: Option<bool>,
835
836 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
837 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
838 /// Cannot be used in conjunction with `conversation`.
839 #[serde(skip_serializing_if = "Option::is_none")]
840 pub previous_response_id: Option<String>,
841
842 /// Reference to a prompt template and its variables.
843 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
844 #[serde(skip_serializing_if = "Option::is_none")]
845 pub prompt: Option<Prompt>,
846
847 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
848 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
849 #[serde(skip_serializing_if = "Option::is_none")]
850 pub prompt_cache_key: Option<String>,
851
852 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
853 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
854 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
855 #[serde(skip_serializing_if = "Option::is_none")]
856 pub prompt_cache_retention: Option<PromptCacheRetention>,
857
858 /// **gpt-5 and o-series models only**
859 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
860 #[serde(skip_serializing_if = "Option::is_none")]
861 pub reasoning: Option<Reasoning>,
862
863 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
864 /// usage policies.
865 ///
866 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
867 /// or email address, in order to avoid sending us any identifying information. [Learn
868 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
869 #[serde(skip_serializing_if = "Option::is_none")]
870 pub safety_identifier: Option<String>,
871
872 /// Specifies the processing type used for serving the request.
873 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
874 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
875 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
876 /// - When not set, the default behavior is 'auto'.
877 ///
878 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
879 #[serde(skip_serializing_if = "Option::is_none")]
880 pub service_tier: Option<ServiceTier>,
881
882 /// Whether to store the generated model response for later retrieval via API.
883 #[serde(skip_serializing_if = "Option::is_none")]
884 pub store: Option<bool>,
885
886 /// If set to true, the model response data will be streamed to the client
887 /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
888 /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
889 /// for more information.
890 #[serde(skip_serializing_if = "Option::is_none")]
891 pub stream: Option<bool>,
892
893 /// Options for streaming responses. Only set this when you set `stream: true`.
894 #[serde(skip_serializing_if = "Option::is_none")]
895 pub stream_options: Option<ResponseStreamOptions>,
896
897 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
898 /// will make the output more random, while lower values like 0.2 will make it
899 /// more focused and deterministic. We generally recommend altering this or
900 /// `top_p` but not both.
901 #[serde(skip_serializing_if = "Option::is_none")]
902 pub temperature: Option<f32>,
903
904 /// Configuration options for a text response from the model. Can be plain
905 /// text or structured JSON data. Learn more:
906 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
907 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
908 #[serde(skip_serializing_if = "Option::is_none")]
909 pub text: Option<ResponseTextParam>,
910
911 /// How the model should select which tool (or tools) to use when generating
912 /// a response. See the `tools` parameter to see how to specify which tools
913 /// the model can call.
914 #[serde(skip_serializing_if = "Option::is_none")]
915 pub tool_choice: Option<ToolChoiceParam>,
916
917 /// An array of tools the model may call while generating a response. You
918 /// can specify which tool to use by setting the `tool_choice` parameter.
919 ///
920 /// We support the following categories of tools:
921 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
922 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
923 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
924 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
925 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
926 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
927 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
928 /// - **Function calls (custom tools)**: Functions that are defined by you,
929 /// enabling the model to call your own code with strongly typed arguments
930 /// and outputs. Learn more about
931 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
932 /// custom tools to call your own code.
933 #[serde(skip_serializing_if = "Option::is_none")]
934 pub tools: Option<Vec<Tool>>,
935
936 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
937 /// token position, each with an associated log probability.
938 #[serde(skip_serializing_if = "Option::is_none")]
939 pub top_logprobs: Option<u8>,
940
941 /// An alternative to sampling with temperature, called nucleus sampling,
942 /// where the model considers the results of the tokens with top_p probability
943 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
944 /// are considered.
945 ///
946 /// We generally recommend altering this or `temperature` but not both.
947 #[serde(skip_serializing_if = "Option::is_none")]
948 pub top_p: Option<f32>,
949
950 ///The truncation strategy to use for the model response.
951 /// - `auto`: If the input to this Response exceeds
952 /// the model's context window size, the model will truncate the
953 /// response to fit the context window by dropping items from the beginning of the conversation.
954 /// - `disabled` (default): If the input size will exceed the context window
955 /// size for a model, the request will fail with a 400 error.
956 #[serde(skip_serializing_if = "Option::is_none")]
957 pub truncation: Option<Truncation>,
958}
959
960#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
961#[serde(untagged)]
962pub enum ResponsePromptVariables {
963 String(String),
964 Content(InputContent),
965 Custom(serde_json::Value),
966}
967
968#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
969pub struct Prompt {
970 /// The unique identifier of the prompt template to use.
971 pub id: String,
972
973 /// Optional version of the prompt template.
974 #[serde(skip_serializing_if = "Option::is_none")]
975 pub version: Option<String>,
976
977 /// Optional map of values to substitute in for variables in your
978 /// prompt. The substitution values can either be strings, or other
979 /// Response input types like images or files.
980 #[serde(skip_serializing_if = "Option::is_none")]
981 pub variables: Option<ResponsePromptVariables>,
982}
983
984#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
985#[serde(rename_all = "lowercase")]
986pub enum ServiceTier {
987 #[default]
988 Auto,
989 Default,
990 Flex,
991 Scale,
992 Priority,
993}
994
995/// Truncation strategies.
996#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
997#[serde(rename_all = "lowercase")]
998pub enum Truncation {
999 Auto,
1000 Disabled,
1001}
1002
1003#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1004pub struct Billing {
1005 pub payer: String,
1006}
1007
1008/// o-series reasoning settings.
1009#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1010#[builder(
1011 name = "ReasoningArgs",
1012 pattern = "mutable",
1013 setter(into, strip_option),
1014 default
1015)]
1016#[builder(build_fn(error = "OpenAIError"))]
1017pub struct Reasoning {
1018 /// Constrains effort on reasoning for
1019 /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
1020 /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
1021 /// reasoning effort can result in faster responses and fewer tokens used
1022 /// on reasoning in a response.
1023 ///
1024 /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1025 #[serde(skip_serializing_if = "Option::is_none")]
1026 pub effort: Option<ReasoningEffort>,
1027 /// A summary of the reasoning performed by the model. This can be
1028 /// useful for debugging and understanding the model's reasoning process.
1029 /// One of `auto`, `concise`, or `detailed`.
1030 ///
1031 /// `concise` is supported for `computer-use-preview` models and all reasoning models after
1032 /// `gpt-5`.
1033 #[serde(skip_serializing_if = "Option::is_none")]
1034 pub summary: Option<ReasoningSummary>,
1035}
1036
1037/// o-series reasoning settings.
1038#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
1039#[serde(rename_all = "lowercase")]
1040pub enum Verbosity {
1041 Low,
1042 Medium,
1043 High,
1044}
1045
1046#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1047#[serde(rename_all = "lowercase")]
1048pub enum ReasoningSummary {
1049 Auto,
1050 Concise,
1051 Detailed,
1052}
1053
1054/// The retention policy for the prompt cache.
1055#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1056pub enum PromptCacheRetention {
1057 #[serde(rename = "in_memory")]
1058 InMemory,
1059 #[serde(rename = "24h")]
1060 Hours24,
1061}
1062
1063/// Configuration for text response format.
1064#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1065pub struct ResponseTextParam {
1066 /// An object specifying the format that the model must output.
1067 ///
1068 /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
1069 /// which ensures the model will match your supplied JSON schema. Learn more in the
1070 /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1071 ///
1072 /// The default format is `{ "type": "text" }` with no additional options.
1073 ///
1074 /// **Not recommended for gpt-4o and newer models:**
1075 ///
1076 /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1077 /// ensures the message the model generates is valid JSON. Using `json_schema`
1078 /// is preferred for models that support it.
1079 pub format: TextResponseFormatConfiguration,
1080
1081 /// Constrains the verbosity of the model's response. Lower values will result in
1082 /// more concise responses, while higher values will result in more verbose responses.
1083 ///
1084 /// Currently supported values are `low`, `medium`, and `high`.
1085 #[serde(skip_serializing_if = "Option::is_none")]
1086 pub verbosity: Option<Verbosity>,
1087}
1088
1089#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
1090#[serde(tag = "type", rename_all = "snake_case")]
1091pub enum TextResponseFormatConfiguration {
1092 /// Default response format. Used to generate text responses.
1093 Text,
1094 /// JSON object response format. An older method of generating JSON responses.
1095 /// Using `json_schema` is recommended for models that support it.
1096 /// Note that the model will not generate JSON without a system or user message
1097 /// instructing it to do so.
1098 JsonObject,
1099 /// JSON Schema response format. Used to generate structured JSON responses.
1100 /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
1101 JsonSchema(ResponseFormatJsonSchema),
1102}
1103
1104/// Definitions for model-callable tools.
1105#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1106#[serde(tag = "type", rename_all = "snake_case")]
1107pub enum Tool {
1108 /// Defines a function in your own code the model can choose to call. Learn more about [function
1109 /// calling](https://platform.openai.com/docs/guides/tools).
1110 Function(FunctionTool),
1111 /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
1112 /// tool](https://platform.openai.com/docs/guides/tools-file-search).
1113 FileSearch(FileSearchTool),
1114 /// A tool that controls a virtual computer. Learn more about the [computer
1115 /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
1116 ComputerUsePreview(ComputerUsePreviewTool),
1117 /// Search the Internet for sources related to the prompt. Learn more about the
1118 /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
1119 WebSearch(WebSearchTool),
1120 /// type: web_search_2025_08_26
1121 #[serde(rename = "web_search_2025_08_26")]
1122 WebSearch20250826(WebSearchTool),
1123 /// Give the model access to additional tools via remote Model Context Protocol
1124 /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
1125 Mcp(MCPTool),
1126 /// A tool that runs Python code to help generate a response to a prompt.
1127 CodeInterpreter(CodeInterpreterTool),
1128 /// A tool that generates images using a model like `gpt-image-1`.
1129 ImageGeneration(ImageGenTool),
1130 /// A tool that allows the model to execute shell commands in a local environment.
1131 LocalShell,
1132 /// A tool that allows the model to execute shell commands.
1133 Shell(FunctionShellToolParam),
1134 /// A custom tool that processes input using a specified format. Learn more about [custom
1135 /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
1136 Custom(CustomToolParam),
1137 /// A tool that controls a virtual computer. Learn more about the
1138 /// [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
1139 Computer(ComputerTool),
1140 /// Groups function/custom tools under a shared namespace.
1141 Namespace(NamespaceToolParam),
1142 /// Hosted or BYOT tool search configuration for deferred tools.
1143 ToolSearch(ToolSearchToolParam),
1144 /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
1145 ///tool](https://platform.openai.com/docs/guides/tools-web-search).
1146 WebSearchPreview(WebSearchTool),
1147 /// type: web_search_preview_2025_03_11
1148 #[serde(rename = "web_search_preview_2025_03_11")]
1149 WebSearchPreview20250311(WebSearchTool),
1150 /// Allows the assistant to create, delete, or update files using unified diffs.
1151 ApplyPatch,
1152}
1153
1154#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1155pub struct CustomToolParam {
1156 /// The name of the custom tool, used to identify it in tool calls.
1157 pub name: String,
1158 /// Optional description of the custom tool, used to provide more context.
1159 pub description: Option<String>,
1160 /// The input format for the custom tool. Default is unconstrained text.
1161 pub format: CustomToolParamFormat,
1162 /// Whether this tool should be deferred and discovered via tool search.
1163 #[serde(skip_serializing_if = "Option::is_none")]
1164 pub defer_loading: Option<bool>,
1165}
1166
1167#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1168#[serde(tag = "type", rename_all = "lowercase")]
1169pub enum CustomToolParamFormat {
1170 /// Unconstrained free-form text.
1171 #[default]
1172 Text,
1173 /// A grammar defined by the user.
1174 Grammar(CustomGrammarFormatParam),
1175}
1176
1177#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1178#[builder(
1179 name = "FileSearchToolArgs",
1180 pattern = "mutable",
1181 setter(into, strip_option),
1182 default
1183)]
1184#[builder(build_fn(error = "OpenAIError"))]
1185pub struct FileSearchTool {
1186 /// The IDs of the vector stores to search.
1187 pub vector_store_ids: Vec<String>,
1188 /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
1189 #[serde(skip_serializing_if = "Option::is_none")]
1190 pub max_num_results: Option<u32>,
1191 /// A filter to apply.
1192 #[serde(skip_serializing_if = "Option::is_none")]
1193 pub filters: Option<Filter>,
1194 /// Ranking options for search.
1195 #[serde(skip_serializing_if = "Option::is_none")]
1196 pub ranking_options: Option<RankingOptions>,
1197}
1198
1199#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1200#[builder(
1201 name = "FunctionToolArgs",
1202 pattern = "mutable",
1203 setter(into, strip_option),
1204 default
1205)]
1206pub struct FunctionTool {
1207 /// The name of the function to call.
1208 pub name: String,
1209 /// A JSON schema object describing the parameters of the function.
1210 #[serde(skip_serializing_if = "Option::is_none")]
1211 pub parameters: Option<serde_json::Value>,
1212 /// Whether to enforce strict parameter validation. Default `true`.
1213 #[serde(skip_serializing_if = "Option::is_none")]
1214 pub strict: Option<bool>,
1215 /// A description of the function. Used by the model to determine whether or not to call the
1216 /// function.
1217 #[serde(skip_serializing_if = "Option::is_none")]
1218 pub description: Option<String>,
1219 /// Whether this function is deferred and loaded via tool search.
1220 #[serde(skip_serializing_if = "Option::is_none")]
1221 pub defer_loading: Option<bool>,
1222}
1223
1224#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1225pub struct WebSearchToolFilters {
1226 /// Allowed domains for the search. If not provided, all domains are allowed.
1227 /// Subdomains of the provided domains are allowed as well.
1228 ///
1229 /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1230 #[serde(skip_serializing_if = "Option::is_none")]
1231 pub allowed_domains: Option<Vec<String>>,
1232}
1233
1234#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1235#[builder(
1236 name = "WebSearchToolArgs",
1237 pattern = "mutable",
1238 setter(into, strip_option),
1239 default
1240)]
1241pub struct WebSearchTool {
1242 /// Filters for the search.
1243 #[serde(skip_serializing_if = "Option::is_none")]
1244 pub filters: Option<WebSearchToolFilters>,
1245 /// The approximate location of the user.
1246 #[serde(skip_serializing_if = "Option::is_none")]
1247 pub user_location: Option<WebSearchApproximateLocation>,
1248 /// High level guidance for the amount of context window space to use for the search. One of `low`,
1249 /// `medium`, or `high`. `medium` is the default.
1250 #[serde(skip_serializing_if = "Option::is_none")]
1251 pub search_context_size: Option<WebSearchToolSearchContextSize>,
1252 /// The types of content to search for.
1253 #[serde(skip_serializing_if = "Option::is_none")]
1254 pub search_content_types: Option<Vec<SearchContentType>>,
1255}
1256
1257#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1258#[serde(rename_all = "lowercase")]
1259pub enum WebSearchToolSearchContextSize {
1260 Low,
1261 #[default]
1262 Medium,
1263 High,
1264}
1265
1266#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1267#[serde(rename_all = "lowercase")]
1268pub enum ComputerEnvironment {
1269 Windows,
1270 Mac,
1271 Linux,
1272 Ubuntu,
1273 #[default]
1274 Browser,
1275}
1276
1277#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1278#[builder(
1279 name = "ComputerUsePreviewToolArgs",
1280 pattern = "mutable",
1281 setter(into, strip_option),
1282 default
1283)]
1284pub struct ComputerUsePreviewTool {
1285 /// The type of computer environment to control.
1286 environment: ComputerEnvironment,
1287 /// The width of the computer display.
1288 display_width: u32,
1289 /// The height of the computer display.
1290 display_height: u32,
1291}
1292
1293#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1294pub enum RankVersionType {
1295 #[serde(rename = "auto")]
1296 Auto,
1297 #[serde(rename = "default-2024-11-15")]
1298 Default20241115,
1299}
1300
1301#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1302pub struct HybridSearch {
1303 /// The weight of the embedding in the reciprocal ranking fusion.
1304 pub embedding_weight: f32,
1305 /// The weight of the text in the reciprocal ranking fusion.
1306 pub text_weight: f32,
1307}
1308
1309/// Options for search result ranking.
1310#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1311pub struct RankingOptions {
1312 /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1313 /// sparse keyword matches when hybrid search is enabled.
1314 #[serde(skip_serializing_if = "Option::is_none")]
1315 pub hybrid_search: Option<HybridSearch>,
1316 /// The ranker to use for the file search.
1317 pub ranker: RankVersionType,
1318 /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1319 /// attempt to return only the most relevant results, but may return fewer results.
1320 #[serde(skip_serializing_if = "Option::is_none")]
1321 pub score_threshold: Option<f32>,
1322}
1323
1324#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1325#[serde(rename_all = "lowercase")]
1326pub enum WebSearchApproximateLocationType {
1327 #[default]
1328 Approximate,
1329}
1330
1331/// Approximate user location for web search.
1332#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1333#[builder(
1334 name = "WebSearchApproximateLocationArgs",
1335 pattern = "mutable",
1336 setter(into, strip_option),
1337 default
1338)]
1339#[builder(build_fn(error = "OpenAIError"))]
1340pub struct WebSearchApproximateLocation {
1341 /// The type of location approximation. Defaults to `approximate` when omitted in JSON input.
1342 #[serde(default)]
1343 pub r#type: WebSearchApproximateLocationType,
1344 /// Free text input for the city of the user, e.g. `San Francisco`.
1345 #[serde(skip_serializing_if = "Option::is_none")]
1346 pub city: Option<String>,
1347 /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1348 /// e.g. `US`.
1349 #[serde(skip_serializing_if = "Option::is_none")]
1350 pub country: Option<String>,
1351 /// Free text input for the region of the user, e.g. `California`.
1352 #[serde(skip_serializing_if = "Option::is_none")]
1353 pub region: Option<String>,
1354 /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1355 /// `America/Los_Angeles`.
1356 #[serde(skip_serializing_if = "Option::is_none")]
1357 pub timezone: Option<String>,
1358}
1359
1360/// Container configuration for a code interpreter.
1361#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1362#[serde(tag = "type", rename_all = "snake_case")]
1363pub enum CodeInterpreterToolContainer {
1364 /// Configuration for a code interpreter container. Optionally specify the IDs of the
1365 /// files to run the code on.
1366 Auto(CodeInterpreterContainerAuto),
1367
1368 /// The container ID.
1369 #[serde(untagged)]
1370 ContainerID(String),
1371}
1372
1373/// Auto configuration for code interpreter container.
1374#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1375pub struct CodeInterpreterContainerAuto {
1376 /// An optional list of uploaded files to make available to your code.
1377 #[serde(skip_serializing_if = "Option::is_none")]
1378 pub file_ids: Option<Vec<String>>,
1379
1380 #[serde(skip_serializing_if = "Option::is_none")]
1381 pub memory_limit: Option<u64>,
1382}
1383
1384#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1385#[builder(
1386 name = "CodeInterpreterToolArgs",
1387 pattern = "mutable",
1388 setter(into, strip_option),
1389 default
1390)]
1391#[builder(build_fn(error = "OpenAIError"))]
1392pub struct CodeInterpreterTool {
1393 /// The code interpreter container. Can be a container ID or an object that
1394 /// specifies uploaded file IDs to make available to your code, along with an
1395 /// optional `memory_limit` setting.
1396 pub container: CodeInterpreterToolContainer,
1397}
1398
1399#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1400pub struct ImageGenToolInputImageMask {
1401 /// Base64-encoded mask image.
1402 #[serde(skip_serializing_if = "Option::is_none")]
1403 pub image_url: Option<String>,
1404 /// File ID for the mask image.
1405 #[serde(skip_serializing_if = "Option::is_none")]
1406 pub file_id: Option<String>,
1407}
1408
1409#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1410#[serde(rename_all = "lowercase")]
1411pub enum InputFidelity {
1412 #[default]
1413 High,
1414 Low,
1415}
1416
1417#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1418#[serde(rename_all = "lowercase")]
1419pub enum ImageGenToolModeration {
1420 #[default]
1421 Auto,
1422 Low,
1423}
1424
1425/// Whether to generate a new image or edit an existing image.
1426#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1427#[serde(rename_all = "lowercase")]
1428pub enum ImageGenActionEnum {
1429 /// Generate a new image.
1430 Generate,
1431 /// Edit an existing image.
1432 Edit,
1433 /// Automatically determine whether to generate or edit.
1434 #[default]
1435 Auto,
1436}
1437
1438/// Image generation tool definition.
1439#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1440#[builder(
1441 name = "ImageGenerationArgs",
1442 pattern = "mutable",
1443 setter(into, strip_option),
1444 default
1445)]
1446#[builder(build_fn(error = "OpenAIError"))]
1447pub struct ImageGenTool {
1448 /// Background type for the generated image. One of `transparent`,
1449 /// `opaque`, or `auto`. Default: `auto`.
1450 #[serde(skip_serializing_if = "Option::is_none")]
1451 pub background: Option<ImageGenToolBackground>,
1452 /// Control how much effort the model will exert to match the style and features, especially facial features,
1453 /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1454 /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1455 #[serde(skip_serializing_if = "Option::is_none")]
1456 pub input_fidelity: Option<InputFidelity>,
1457 /// Optional mask for inpainting. Contains `image_url`
1458 /// (string, optional) and `file_id` (string, optional).
1459 #[serde(skip_serializing_if = "Option::is_none")]
1460 pub input_image_mask: Option<ImageGenToolInputImageMask>,
1461 /// The image generation model to use. Default: `gpt-image-1`.
1462 #[serde(skip_serializing_if = "Option::is_none")]
1463 pub model: Option<String>,
1464 /// Moderation level for the generated image. Default: `auto`.
1465 #[serde(skip_serializing_if = "Option::is_none")]
1466 pub moderation: Option<ImageGenToolModeration>,
1467 /// Compression level for the output image. Default: 100.
1468 #[serde(skip_serializing_if = "Option::is_none")]
1469 pub output_compression: Option<u8>,
1470 /// The output format of the generated image. One of `png`, `webp`, or
1471 /// `jpeg`. Default: `png`.
1472 #[serde(skip_serializing_if = "Option::is_none")]
1473 pub output_format: Option<ImageGenToolOutputFormat>,
1474 /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1475 #[serde(skip_serializing_if = "Option::is_none")]
1476 pub partial_images: Option<u8>,
1477 /// The quality of the generated image. One of `low`, `medium`, `high`,
1478 /// or `auto`. Default: `auto`.
1479 #[serde(skip_serializing_if = "Option::is_none")]
1480 pub quality: Option<ImageGenToolQuality>,
1481 /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1482 /// `1536x1024`, or `auto`. Default: `auto`.
1483 #[serde(skip_serializing_if = "Option::is_none")]
1484 pub size: Option<ImageGenToolSize>,
1485 /// Whether to generate a new image or edit an existing image. Default: `auto`.
1486 #[serde(skip_serializing_if = "Option::is_none")]
1487 pub action: Option<ImageGenActionEnum>,
1488}
1489
1490#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1491#[serde(rename_all = "lowercase")]
1492pub enum ImageGenToolBackground {
1493 Transparent,
1494 Opaque,
1495 #[default]
1496 Auto,
1497}
1498
1499#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1500#[serde(rename_all = "lowercase")]
1501pub enum ImageGenToolOutputFormat {
1502 #[default]
1503 Png,
1504 Webp,
1505 Jpeg,
1506}
1507
1508#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1509#[serde(rename_all = "lowercase")]
1510pub enum ImageGenToolQuality {
1511 Low,
1512 Medium,
1513 High,
1514 #[default]
1515 Auto,
1516}
1517
1518#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1519#[serde(rename_all = "lowercase")]
1520pub enum ImageGenToolSize {
1521 #[default]
1522 Auto,
1523 #[serde(rename = "1024x1024")]
1524 Size1024x1024,
1525 #[serde(rename = "1024x1536")]
1526 Size1024x1536,
1527 #[serde(rename = "1536x1024")]
1528 Size1536x1024,
1529}
1530
1531#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1532#[serde(rename_all = "lowercase")]
1533pub enum ToolChoiceAllowedMode {
1534 Auto,
1535 Required,
1536}
1537
1538#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1539pub struct ToolChoiceAllowed {
1540 /// Constrains the tools available to the model to a pre-defined set.
1541 ///
1542 /// `auto` allows the model to pick from among the allowed tools and generate a
1543 /// message.
1544 ///
1545 /// `required` requires the model to call one or more of the allowed tools.
1546 pub mode: ToolChoiceAllowedMode,
1547 /// A list of tool definitions that the model should be allowed to call.
1548 ///
1549 /// For the Responses API, the list of tool definitions might look like:
1550 /// ```json
1551 /// [
1552 /// { "type": "function", "name": "get_weather" },
1553 /// { "type": "mcp", "server_label": "deepwiki" },
1554 /// { "type": "image_generation" }
1555 /// ]
1556 /// ```
1557 pub tools: Vec<serde_json::Value>,
1558}
1559
1560/// The type of hosted tool the model should to use. Learn more about
1561/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1562#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1563#[serde(tag = "type", rename_all = "snake_case")]
1564pub enum ToolChoiceTypes {
1565 FileSearch,
1566 WebSearchPreview,
1567 Computer,
1568 ComputerUsePreview,
1569 ComputerUse,
1570 #[serde(rename = "web_search_preview_2025_03_11")]
1571 WebSearchPreview20250311,
1572 CodeInterpreter,
1573 ImageGeneration,
1574}
1575
1576#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1577pub struct ToolChoiceFunction {
1578 /// The name of the function to call.
1579 pub name: String,
1580}
1581
1582#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1583pub struct ToolChoiceMCP {
1584 /// The name of the tool to call on the server.
1585 pub name: String,
1586 /// The label of the MCP server to use.
1587 pub server_label: String,
1588}
1589
1590#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1591pub struct ToolChoiceCustom {
1592 /// The name of the custom tool to call.
1593 pub name: String,
1594}
1595
1596#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1597#[serde(tag = "type", rename_all = "snake_case")]
1598pub enum ToolChoiceParam {
1599 /// Constrains the tools available to the model to a pre-defined set.
1600 AllowedTools(ToolChoiceAllowed),
1601
1602 /// Use this option to force the model to call a specific function.
1603 Function(ToolChoiceFunction),
1604
1605 /// Use this option to force the model to call a specific tool on a remote MCP server.
1606 Mcp(ToolChoiceMCP),
1607
1608 /// Use this option to force the model to call a custom tool.
1609 Custom(ToolChoiceCustom),
1610
1611 /// Forces the model to call the apply_patch tool when executing a tool call.
1612 ApplyPatch,
1613
1614 /// Forces the model to call the function shell tool when a tool call is required.
1615 Shell,
1616
1617 /// Indicates that the model should use a built-in tool to generate a response.
1618 /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1619 #[serde(untagged)]
1620 Hosted(ToolChoiceTypes),
1621
1622 /// Controls which (if any) tool is called by the model.
1623 ///
1624 /// `none` means the model will not call any tool and instead generates a message.
1625 ///
1626 /// `auto` means the model can pick between generating a message or calling one or
1627 /// more tools.
1628 ///
1629 /// `required` means the model must call one or more tools.
1630 #[serde(untagged)]
1631 Mode(ToolChoiceOptions),
1632}
1633
1634#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1635#[serde(rename_all = "lowercase")]
1636pub enum ToolChoiceOptions {
1637 None,
1638 Auto,
1639 Required,
1640}
1641
1642/// An error that occurred while generating the response.
1643#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1644pub struct ErrorObject {
1645 /// A machine-readable error code that was returned.
1646 pub code: String,
1647 /// A human-readable description of the error that was returned.
1648 pub message: String,
1649}
1650
1651/// Details about an incomplete response.
1652#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1653pub struct IncompleteDetails {
1654 /// The reason why the response is incomplete.
1655 pub reason: String,
1656}
1657
1658#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1659pub struct TopLogProb {
1660 pub bytes: Vec<u8>,
1661 pub logprob: f64,
1662 pub token: String,
1663}
1664
1665#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1666pub struct LogProb {
1667 pub bytes: Vec<u8>,
1668 pub logprob: f64,
1669 pub token: String,
1670 pub top_logprobs: Vec<TopLogProb>,
1671}
1672
1673#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1674pub struct ResponseTopLobProb {
1675 /// The log probability of this token.
1676 pub logprob: f64,
1677 /// A possible text token.
1678 pub token: String,
1679}
1680
1681#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1682pub struct ResponseLogProb {
1683 /// The log probability of this token.
1684 pub logprob: f64,
1685 /// A possible text token.
1686 pub token: String,
1687 /// The log probability of the top 20 most likely tokens.
1688 pub top_logprobs: Vec<ResponseTopLobProb>,
1689}
1690
1691/// A simple text output from the model.
1692#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1693pub struct OutputTextContent {
1694 /// The annotations of the text output.
1695 pub annotations: Vec<Annotation>,
1696 pub logprobs: Option<Vec<LogProb>>,
1697 /// The text output from the model.
1698 pub text: String,
1699}
1700
1701/// An annotation that applies to a span of output text.
1702#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1703#[serde(tag = "type", rename_all = "snake_case")]
1704pub enum Annotation {
1705 /// A citation to a file.
1706 FileCitation(FileCitationBody),
1707 /// A citation for a web resource used to generate a model response.
1708 UrlCitation(UrlCitationBody),
1709 /// A citation for a container file used to generate a model response.
1710 ContainerFileCitation(ContainerFileCitationBody),
1711 /// A path to a file.
1712 FilePath(FilePath),
1713}
1714
1715#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1716pub struct FileCitationBody {
1717 /// The ID of the file.
1718 file_id: String,
1719 /// The filename of the file cited.
1720 filename: String,
1721 /// The index of the file in the list of files.
1722 index: u32,
1723}
1724
1725#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1726pub struct UrlCitationBody {
1727 /// The index of the last character of the URL citation in the message.
1728 end_index: u32,
1729 /// The index of the first character of the URL citation in the message.
1730 start_index: u32,
1731 /// The title of the web resource.
1732 title: String,
1733 /// The URL of the web resource.
1734 url: String,
1735}
1736
1737#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1738pub struct ContainerFileCitationBody {
1739 /// The ID of the container file.
1740 container_id: String,
1741 /// The index of the last character of the container file citation in the message.
1742 end_index: u32,
1743 /// The ID of the file.
1744 file_id: String,
1745 /// The filename of the container file cited.
1746 filename: String,
1747 /// The index of the first character of the container file citation in the message.
1748 start_index: u32,
1749}
1750
1751#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1752pub struct FilePath {
1753 /// The ID of the file.
1754 file_id: String,
1755 /// The index of the file in the list of files.
1756 index: u32,
1757}
1758
1759/// A refusal explanation from the model.
1760#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1761pub struct RefusalContent {
1762 /// The refusal explanation from the model.
1763 pub refusal: String,
1764}
1765
1766/// A message generated by the model.
1767#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1768pub struct OutputMessage {
1769 /// The content of the output message.
1770 pub content: Vec<OutputMessageContent>,
1771 /// The unique ID of the output message.
1772 pub id: String,
1773 /// The role of the output message. Always `assistant`.
1774 pub role: AssistantRole,
1775 /// Labels this assistant message as intermediate commentary (`commentary`) or
1776 /// the final answer (`final_answer`).
1777 #[serde(skip_serializing_if = "Option::is_none")]
1778 pub phase: Option<MessagePhase>,
1779 /// The status of the message input. One of `in_progress`, `completed`, or
1780 /// `incomplete`. Populated when input items are returned via API.
1781 pub status: OutputStatus,
1782 ///// The type of the output message. Always `message`.
1783 //pub r#type: MessageType,
1784}
1785
1786#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1787#[serde(rename_all = "lowercase")]
1788pub enum MessageType {
1789 #[default]
1790 Message,
1791}
1792
1793/// The role for an output message - always `assistant`.
1794/// This type ensures type safety by only allowing the assistant role.
1795#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1796#[serde(rename_all = "lowercase")]
1797pub enum AssistantRole {
1798 #[default]
1799 Assistant,
1800}
1801
1802#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1803#[serde(tag = "type", rename_all = "snake_case")]
1804pub enum OutputMessageContent {
1805 /// A text output from the model.
1806 OutputText(OutputTextContent),
1807 /// A refusal from the model.
1808 Refusal(RefusalContent),
1809}
1810
1811#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1812#[serde(tag = "type", rename_all = "snake_case")]
1813pub enum OutputContent {
1814 /// A text output from the model.
1815 OutputText(OutputTextContent),
1816 /// A refusal from the model.
1817 Refusal(RefusalContent),
1818 /// Reasoning text from the model.
1819 ReasoningText(ReasoningTextContent),
1820}
1821
1822#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1823pub struct ReasoningTextContent {
1824 /// The reasoning text from the model.
1825 pub text: String,
1826}
1827
1828/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1829#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1830pub struct ReasoningItem {
1831 /// Unique identifier of the reasoning content.
1832 pub id: String,
1833 /// Reasoning summary content.
1834 pub summary: Vec<SummaryPart>,
1835 /// Reasoning text content.
1836 #[serde(skip_serializing_if = "Option::is_none")]
1837 pub content: Option<Vec<ReasoningTextContent>>,
1838 /// The encrypted content of the reasoning item - populated when a response is generated with
1839 /// `reasoning.encrypted_content` in the `include` parameter.
1840 #[serde(skip_serializing_if = "Option::is_none")]
1841 pub encrypted_content: Option<String>,
1842 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1843 /// Populated when items are returned via API.
1844 #[serde(skip_serializing_if = "Option::is_none")]
1845 pub status: Option<OutputStatus>,
1846}
1847
1848#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1849#[serde(tag = "type", rename_all = "snake_case")]
1850pub enum SummaryPart {
1851 SummaryText(SummaryTextContent),
1852}
1853
1854/// File search tool call output.
1855#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1856pub struct FileSearchToolCall {
1857 /// The unique ID of the file search tool call.
1858 pub id: String,
1859 /// The queries used to search for files.
1860 pub queries: Vec<String>,
1861 /// The status of the file search tool call. One of `in_progress`, `searching`,
1862 /// `incomplete`,`failed`, or `completed`.
1863 pub status: FileSearchToolCallStatus,
1864 /// The results of the file search tool call.
1865 #[serde(skip_serializing_if = "Option::is_none")]
1866 pub results: Option<Vec<FileSearchToolCallResult>>,
1867}
1868
1869#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1870#[serde(rename_all = "snake_case")]
1871pub enum FileSearchToolCallStatus {
1872 InProgress,
1873 Searching,
1874 Incomplete,
1875 Failed,
1876 Completed,
1877}
1878
1879/// A single result from a file search.
1880#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1881pub struct FileSearchToolCallResult {
1882 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1883 /// additional information about the object in a structured format, and querying for objects
1884 /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1885 /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1886 pub attributes: HashMap<String, serde_json::Value>,
1887 /// The unique ID of the file.
1888 pub file_id: String,
1889 /// The name of the file.
1890 pub filename: String,
1891 /// The relevance score of the file - a value between 0 and 1.
1892 pub score: f32,
1893 /// The text that was retrieved from the file.
1894 pub text: String,
1895}
1896
1897#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1898pub struct ComputerCallSafetyCheckParam {
1899 /// The ID of the pending safety check.
1900 pub id: String,
1901 /// The type of the pending safety check.
1902 #[serde(skip_serializing_if = "Option::is_none")]
1903 pub code: Option<String>,
1904 /// Details about the pending safety check.
1905 #[serde(skip_serializing_if = "Option::is_none")]
1906 pub message: Option<String>,
1907}
1908
1909#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1910#[serde(rename_all = "snake_case")]
1911pub enum WebSearchToolCallStatus {
1912 InProgress,
1913 Searching,
1914 Completed,
1915 Failed,
1916}
1917
1918#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1919pub struct WebSearchActionSearchSource {
1920 /// The type of source. Always `url`.
1921 pub r#type: String,
1922 /// The URL of the source.
1923 pub url: String,
1924}
1925
1926#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1927pub struct WebSearchActionSearch {
1928 /// The search query.
1929 pub query: String,
1930 /// The sources used in the search.
1931 pub sources: Option<Vec<WebSearchActionSearchSource>>,
1932}
1933
1934#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1935pub struct WebSearchActionOpenPage {
1936 /// The URL opened by the model.
1937 pub url: Option<String>,
1938}
1939
1940#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1941pub struct WebSearchActionFind {
1942 /// The URL of the page searched for the pattern.
1943 pub url: String,
1944 /// The pattern or text to search for within the page.
1945 pub pattern: String,
1946}
1947
1948#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1949#[serde(tag = "type", rename_all = "snake_case")]
1950pub enum WebSearchToolCallAction {
1951 /// Action type "search" - Performs a web search query.
1952 Search(WebSearchActionSearch),
1953 /// Action type "open_page" - Opens a specific URL from search results.
1954 OpenPage(WebSearchActionOpenPage),
1955 /// Action type "find": Searches for a pattern within a loaded page.
1956 Find(WebSearchActionFind),
1957 /// Action type "find_in_page": https://platform.openai.com/docs/guides/tools-web-search#output-and-citations
1958 FindInPage(WebSearchActionFind),
1959}
1960
1961/// Web search tool call output.
1962#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1963pub struct WebSearchToolCall {
1964 /// An object describing the specific action taken in this web search call. Includes
1965 /// details on how the model used the web (search, open_page, find, find_in_page).
1966 pub action: WebSearchToolCallAction,
1967 /// The unique ID of the web search tool call.
1968 pub id: String,
1969 /// The status of the web search tool call.
1970 pub status: WebSearchToolCallStatus,
1971}
1972
1973/// Output from a computer tool call.
1974#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1975pub struct ComputerToolCall {
1976 #[serde(skip_serializing_if = "Option::is_none")]
1977 pub action: Option<ComputerAction>,
1978 /// Flattened batched actions for `computer_use`. Each action includes a
1979 /// `type` discriminator and action-specific fields.
1980 #[serde(skip_serializing_if = "Option::is_none")]
1981 pub actions: Option<Vec<ComputerAction>>,
1982 /// An identifier used when responding to the tool call with output.
1983 pub call_id: String,
1984 /// The unique ID of the computer call.
1985 pub id: String,
1986 /// The pending safety checks for the computer call.
1987 pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1988 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1989 /// Populated when items are returned via API.
1990 pub status: OutputStatus,
1991}
1992
1993/// An x/y coordinate pair.
1994#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1995pub struct CoordParam {
1996 /// The x-coordinate.
1997 pub x: i32,
1998 /// The y-coordinate.
1999 pub y: i32,
2000}
2001
2002/// Represents all user‐triggered actions.
2003#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2004#[serde(tag = "type", rename_all = "snake_case")]
2005pub enum ComputerAction {
2006 /// A click action.
2007 Click(ClickParam),
2008
2009 /// A double click action.
2010 DoubleClick(DoubleClickAction),
2011
2012 /// A drag action.
2013 Drag(DragParam),
2014
2015 /// A collection of keypresses the model would like to perform.
2016 Keypress(KeyPressAction),
2017
2018 /// A mouse move action.
2019 Move(MoveParam),
2020
2021 /// A screenshot action.
2022 Screenshot,
2023
2024 /// A scroll action.
2025 Scroll(ScrollParam),
2026
2027 /// An action to type in text.
2028 Type(TypeParam),
2029
2030 /// A wait action.
2031 Wait,
2032}
2033
2034#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2035#[serde(rename_all = "lowercase")]
2036pub enum ClickButtonType {
2037 Left,
2038 Right,
2039 Wheel,
2040 Back,
2041 Forward,
2042}
2043
2044/// A click action.
2045#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2046pub struct ClickParam {
2047 /// Indicates which mouse button was pressed during the click. One of `left`,
2048 /// `right`, `wheel`, `back`, or `forward`.
2049 pub button: ClickButtonType,
2050 /// The x-coordinate where the click occurred.
2051 pub x: i32,
2052 /// The y-coordinate where the click occurred.
2053 pub y: i32,
2054}
2055
2056/// A double click action.
2057#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2058pub struct DoubleClickAction {
2059 /// The x-coordinate where the double click occurred.
2060 pub x: i32,
2061 /// The y-coordinate where the double click occurred.
2062 pub y: i32,
2063}
2064
2065/// A drag action.
2066#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2067pub struct DragParam {
2068 /// An array of coordinates representing the path of the drag action.
2069 pub path: Vec<CoordParam>,
2070}
2071
2072/// A keypress action.
2073#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2074pub struct KeyPressAction {
2075 /// The combination of keys the model is requesting to be pressed.
2076 /// This is an array of strings, each representing a key.
2077 pub keys: Vec<String>,
2078}
2079
2080/// A mouse move action.
2081#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2082pub struct MoveParam {
2083 /// The x-coordinate to move to.
2084 pub x: i32,
2085 /// The y-coordinate to move to.
2086 pub y: i32,
2087}
2088
2089/// A scroll action.
2090#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2091pub struct ScrollParam {
2092 /// The horizontal scroll distance.
2093 pub scroll_x: i32,
2094 /// The vertical scroll distance.
2095 pub scroll_y: i32,
2096 /// The x-coordinate where the scroll occurred.
2097 pub x: i32,
2098 /// The y-coordinate where the scroll occurred.
2099 pub y: i32,
2100}
2101
2102/// A typing (text entry) action.
2103#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2104pub struct TypeParam {
2105 /// The text to type.
2106 pub text: String,
2107}
2108
2109#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2110pub struct FunctionToolCall {
2111 /// A JSON string of the arguments to pass to the function.
2112 pub arguments: String,
2113 /// The unique ID of the function tool call generated by the model.
2114 pub call_id: String,
2115 /// The namespace of the function to run.
2116 #[serde(skip_serializing_if = "Option::is_none")]
2117 pub namespace: Option<String>,
2118 /// The name of the function to run.
2119 pub name: String,
2120 /// The unique ID of the function tool call.
2121 #[serde(skip_serializing_if = "Option::is_none")]
2122 pub id: Option<String>,
2123 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
2124 /// Populated when items are returned via API.
2125 #[serde(skip_serializing_if = "Option::is_none")]
2126 pub status: Option<OutputStatus>,
2127}
2128
2129#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2130#[serde(rename_all = "snake_case")]
2131pub enum ImageGenToolCallStatus {
2132 InProgress,
2133 Completed,
2134 Generating,
2135 Failed,
2136}
2137
2138#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2139pub struct ImageGenToolCall {
2140 /// The unique ID of the image generation call.
2141 pub id: String,
2142 /// The generated image encoded in base64.
2143 pub result: Option<String>,
2144 /// The status of the image generation call.
2145 pub status: ImageGenToolCallStatus,
2146}
2147
2148#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2149#[serde(rename_all = "snake_case")]
2150pub enum CodeInterpreterToolCallStatus {
2151 InProgress,
2152 Completed,
2153 Incomplete,
2154 Interpreting,
2155 Failed,
2156}
2157
2158/// Output of a code interpreter request.
2159#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2160pub struct CodeInterpreterToolCall {
2161 /// The code to run, or null if not available.
2162 #[serde(skip_serializing_if = "Option::is_none")]
2163 pub code: Option<String>,
2164 /// ID of the container used to run the code.
2165 pub container_id: String,
2166 /// The unique ID of the code interpreter tool call.
2167 pub id: String,
2168 /// The outputs generated by the code interpreter, such as logs or images.
2169 /// Can be null if no outputs are available.
2170 #[serde(skip_serializing_if = "Option::is_none")]
2171 pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
2172 /// The status of the code interpreter tool call.
2173 /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
2174 pub status: CodeInterpreterToolCallStatus,
2175}
2176
2177/// Individual result from a code interpreter: either logs or files.
2178#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2179#[serde(tag = "type", rename_all = "snake_case")]
2180pub enum CodeInterpreterToolCallOutput {
2181 /// Code interpreter output logs
2182 Logs(CodeInterpreterOutputLogs),
2183 /// Code interpreter output image
2184 Image(CodeInterpreterOutputImage),
2185}
2186
2187#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2188pub struct CodeInterpreterOutputLogs {
2189 /// The logs output from the code interpreter.
2190 pub logs: String,
2191}
2192
2193#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2194pub struct CodeInterpreterOutputImage {
2195 /// The URL of the image output from the code interpreter.
2196 pub url: String,
2197}
2198
2199#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2200pub struct CodeInterpreterFile {
2201 /// The ID of the file.
2202 file_id: String,
2203 /// The MIME type of the file.
2204 mime_type: String,
2205}
2206
2207#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2208pub struct LocalShellToolCall {
2209 /// Execute a shell command on the server.
2210 pub action: LocalShellExecAction,
2211 /// The unique ID of the local shell tool call generated by the model.
2212 pub call_id: String,
2213 /// The unique ID of the local shell call.
2214 pub id: String,
2215 /// The status of the local shell call.
2216 pub status: OutputStatus,
2217}
2218
2219/// Define the shape of a local shell action (exec).
2220#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2221pub struct LocalShellExecAction {
2222 /// The command to run.
2223 pub command: Vec<String>,
2224 /// Environment variables to set for the command.
2225 pub env: HashMap<String, String>,
2226 /// Optional timeout in milliseconds for the command.
2227 pub timeout_ms: Option<u64>,
2228 /// Optional user to run the command as.
2229 pub user: Option<String>,
2230 /// Optional working directory to run the command in.
2231 pub working_directory: Option<String>,
2232}
2233
2234/// Commands and limits describing how to run the shell tool call.
2235#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2236pub struct FunctionShellActionParam {
2237 /// Ordered shell commands for the execution environment to run.
2238 pub commands: Vec<String>,
2239 /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
2240 #[serde(skip_serializing_if = "Option::is_none")]
2241 pub timeout_ms: Option<u64>,
2242 /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
2243 #[serde(skip_serializing_if = "Option::is_none")]
2244 pub max_output_length: Option<u64>,
2245}
2246
2247/// Status values reported for shell tool calls.
2248#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2249#[serde(rename_all = "snake_case")]
2250pub enum FunctionShellCallItemStatus {
2251 InProgress,
2252 Completed,
2253 Incomplete,
2254}
2255
2256/// The environment for a shell call item (request side).
2257#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2258#[serde(tag = "type", rename_all = "snake_case")]
2259pub enum FunctionShellCallItemEnvironment {
2260 /// Use a local computer environment.
2261 Local(LocalEnvironmentParam),
2262 /// Reference an existing container by ID.
2263 ContainerReference(ContainerReferenceParam),
2264}
2265
2266/// A tool representing a request to execute one or more shell commands.
2267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2268pub struct FunctionShellCallItemParam {
2269 /// The unique ID of the shell tool call. Populated when this item is returned via API.
2270 #[serde(skip_serializing_if = "Option::is_none")]
2271 pub id: Option<String>,
2272 /// The unique ID of the shell tool call generated by the model.
2273 pub call_id: String,
2274 /// The shell commands and limits that describe how to run the tool call.
2275 pub action: FunctionShellActionParam,
2276 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2277 #[serde(skip_serializing_if = "Option::is_none")]
2278 pub status: Option<FunctionShellCallItemStatus>,
2279 /// The environment to execute the shell commands in.
2280 #[serde(skip_serializing_if = "Option::is_none")]
2281 pub environment: Option<FunctionShellCallItemEnvironment>,
2282}
2283
2284/// Indicates that the shell commands finished and returned an exit code.
2285#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2286pub struct FunctionShellCallOutputExitOutcomeParam {
2287 /// The exit code returned by the shell process.
2288 pub exit_code: i32,
2289}
2290
2291/// The exit or timeout outcome associated with this chunk.
2292#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2293#[serde(tag = "type", rename_all = "snake_case")]
2294pub enum FunctionShellCallOutputOutcomeParam {
2295 Timeout,
2296 Exit(FunctionShellCallOutputExitOutcomeParam),
2297}
2298
2299/// Captured stdout and stderr for a portion of a shell tool call output.
2300#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2301pub struct FunctionShellCallOutputContentParam {
2302 /// Captured stdout output for this chunk of the shell call.
2303 pub stdout: String,
2304 /// Captured stderr output for this chunk of the shell call.
2305 pub stderr: String,
2306 /// The exit or timeout outcome associated with this chunk.
2307 pub outcome: FunctionShellCallOutputOutcomeParam,
2308}
2309
2310/// The streamed output items emitted by a shell tool call.
2311#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2312pub struct FunctionShellCallOutputItemParam {
2313 /// The unique ID of the shell tool call output. Populated when this item is returned via API.
2314 #[serde(skip_serializing_if = "Option::is_none")]
2315 pub id: Option<String>,
2316 /// The unique ID of the shell tool call generated by the model.
2317 pub call_id: String,
2318 /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2319 pub output: Vec<FunctionShellCallOutputContentParam>,
2320 /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2321 #[serde(skip_serializing_if = "Option::is_none")]
2322 pub max_output_length: Option<u64>,
2323}
2324
2325/// Status values reported for apply_patch tool calls.
2326#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2327#[serde(rename_all = "snake_case")]
2328pub enum ApplyPatchCallStatusParam {
2329 InProgress,
2330 Completed,
2331}
2332
2333/// Instruction for creating a new file via the apply_patch tool.
2334#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2335pub struct ApplyPatchCreateFileOperationParam {
2336 /// Path of the file to create relative to the workspace root.
2337 pub path: String,
2338 /// Unified diff content to apply when creating the file.
2339 pub diff: String,
2340}
2341
2342/// Instruction for deleting an existing file via the apply_patch tool.
2343#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2344pub struct ApplyPatchDeleteFileOperationParam {
2345 /// Path of the file to delete relative to the workspace root.
2346 pub path: String,
2347}
2348
2349/// Instruction for updating an existing file via the apply_patch tool.
2350#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2351pub struct ApplyPatchUpdateFileOperationParam {
2352 /// Path of the file to update relative to the workspace root.
2353 pub path: String,
2354 /// Unified diff content to apply to the existing file.
2355 pub diff: String,
2356}
2357
2358/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2359#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2360#[serde(tag = "type", rename_all = "snake_case")]
2361pub enum ApplyPatchOperationParam {
2362 CreateFile(ApplyPatchCreateFileOperationParam),
2363 DeleteFile(ApplyPatchDeleteFileOperationParam),
2364 UpdateFile(ApplyPatchUpdateFileOperationParam),
2365}
2366
2367/// A tool call representing a request to create, delete, or update files using diff patches.
2368#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2369pub struct ApplyPatchToolCallItemParam {
2370 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2371 #[serde(skip_serializing_if = "Option::is_none")]
2372 pub id: Option<String>,
2373 /// The unique ID of the apply patch tool call generated by the model.
2374 pub call_id: String,
2375 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2376 pub status: ApplyPatchCallStatusParam,
2377 /// The specific create, delete, or update instruction for the apply_patch tool call.
2378 pub operation: ApplyPatchOperationParam,
2379}
2380
2381/// Outcome values reported for apply_patch tool call outputs.
2382#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2383#[serde(rename_all = "snake_case")]
2384pub enum ApplyPatchCallOutputStatusParam {
2385 Completed,
2386 Failed,
2387}
2388
2389/// The streamed output emitted by an apply patch tool call.
2390#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2391pub struct ApplyPatchToolCallOutputItemParam {
2392 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2393 #[serde(skip_serializing_if = "Option::is_none")]
2394 pub id: Option<String>,
2395 /// The unique ID of the apply patch tool call generated by the model.
2396 pub call_id: String,
2397 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2398 pub status: ApplyPatchCallOutputStatusParam,
2399 /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2400 #[serde(skip_serializing_if = "Option::is_none")]
2401 pub output: Option<String>,
2402}
2403
2404/// Shell exec action
2405/// Execute a shell command.
2406#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2407pub struct FunctionShellAction {
2408 /// A list of commands to run.
2409 pub commands: Vec<String>,
2410 /// Optional timeout in milliseconds for the commands.
2411 pub timeout_ms: Option<u64>,
2412 /// Optional maximum number of characters to return from each command.
2413 pub max_output_length: Option<u64>,
2414}
2415
2416/// Status values reported for function shell tool calls.
2417#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2418#[serde(rename_all = "snake_case")]
2419pub enum LocalShellCallStatus {
2420 InProgress,
2421 Completed,
2422 Incomplete,
2423}
2424
2425/// The environment for a shell call (response side).
2426#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2427#[serde(tag = "type", rename_all = "snake_case")]
2428pub enum FunctionShellCallEnvironment {
2429 /// A local computer environment.
2430 Local,
2431 /// A referenced container.
2432 ContainerReference(ContainerReferenceResource),
2433}
2434
2435/// A tool call that executes one or more shell commands in a managed environment.
2436#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2437pub struct FunctionShellCall {
2438 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2439 pub id: String,
2440 /// The unique ID of the function shell tool call generated by the model.
2441 pub call_id: String,
2442 /// The shell commands and limits that describe how to run the tool call.
2443 pub action: FunctionShellAction,
2444 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2445 pub status: LocalShellCallStatus,
2446 /// The environment in which the shell commands were executed.
2447 pub environment: Option<FunctionShellCallEnvironment>,
2448 /// The ID of the entity that created this tool call.
2449 #[serde(skip_serializing_if = "Option::is_none")]
2450 pub created_by: Option<String>,
2451}
2452
2453/// The content of a shell tool call output that was emitted.
2454#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2455pub struct FunctionShellCallOutputContent {
2456 /// The standard output that was captured.
2457 pub stdout: String,
2458 /// The standard error output that was captured.
2459 pub stderr: String,
2460 /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2461 #[serde(flatten)]
2462 pub outcome: FunctionShellCallOutputOutcome,
2463 /// The identifier of the actor that created the item.
2464 #[serde(skip_serializing_if = "Option::is_none")]
2465 pub created_by: Option<String>,
2466}
2467
2468/// Function shell call outcome
2469#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2470#[serde(tag = "type", rename_all = "snake_case")]
2471pub enum FunctionShellCallOutputOutcome {
2472 Timeout,
2473 Exit(FunctionShellCallOutputExitOutcome),
2474}
2475
2476/// Indicates that the shell commands finished and returned an exit code.
2477#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2478pub struct FunctionShellCallOutputExitOutcome {
2479 /// Exit code from the shell process.
2480 pub exit_code: i32,
2481}
2482
2483/// The output of a shell tool call that was emitted.
2484#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2485pub struct FunctionShellCallOutput {
2486 /// The unique ID of the shell call output. Populated when this item is returned via API.
2487 pub id: String,
2488 /// The unique ID of the shell tool call generated by the model.
2489 pub call_id: String,
2490 /// An array of shell call output contents
2491 pub output: Vec<FunctionShellCallOutputContent>,
2492 /// The maximum length of the shell command output. This is generated by the model and should be
2493 /// passed back with the raw output.
2494 pub max_output_length: Option<u64>,
2495 /// The identifier of the actor that created the item.
2496 #[serde(skip_serializing_if = "Option::is_none")]
2497 pub created_by: Option<String>,
2498}
2499
2500/// Status values reported for apply_patch tool calls.
2501#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2502#[serde(rename_all = "snake_case")]
2503pub enum ApplyPatchCallStatus {
2504 InProgress,
2505 Completed,
2506}
2507
2508/// Instruction describing how to create a file via the apply_patch tool.
2509#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2510pub struct ApplyPatchCreateFileOperation {
2511 /// Path of the file to create.
2512 pub path: String,
2513 /// Diff to apply.
2514 pub diff: String,
2515}
2516
2517/// Instruction describing how to delete a file via the apply_patch tool.
2518#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2519pub struct ApplyPatchDeleteFileOperation {
2520 /// Path of the file to delete.
2521 pub path: String,
2522}
2523
2524/// Instruction describing how to update a file via the apply_patch tool.
2525#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2526pub struct ApplyPatchUpdateFileOperation {
2527 /// Path of the file to update.
2528 pub path: String,
2529 /// Diff to apply.
2530 pub diff: String,
2531}
2532
2533/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2534#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2535#[serde(tag = "type", rename_all = "snake_case")]
2536pub enum ApplyPatchOperation {
2537 CreateFile(ApplyPatchCreateFileOperation),
2538 DeleteFile(ApplyPatchDeleteFileOperation),
2539 UpdateFile(ApplyPatchUpdateFileOperation),
2540}
2541
2542/// A tool call that applies file diffs by creating, deleting, or updating files.
2543#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2544pub struct ApplyPatchToolCall {
2545 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2546 pub id: String,
2547 /// The unique ID of the apply patch tool call generated by the model.
2548 pub call_id: String,
2549 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2550 pub status: ApplyPatchCallStatus,
2551 /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2552 pub operation: ApplyPatchOperation,
2553 /// The ID of the entity that created this tool call.
2554 #[serde(skip_serializing_if = "Option::is_none")]
2555 pub created_by: Option<String>,
2556}
2557
2558/// Outcome values reported for apply_patch tool call outputs.
2559#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2560#[serde(rename_all = "snake_case")]
2561pub enum ApplyPatchCallOutputStatus {
2562 Completed,
2563 Failed,
2564}
2565
2566/// The output emitted by an apply patch tool call.
2567#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2568pub struct ApplyPatchToolCallOutput {
2569 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2570 pub id: String,
2571 /// The unique ID of the apply patch tool call generated by the model.
2572 pub call_id: String,
2573 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2574 pub status: ApplyPatchCallOutputStatus,
2575 /// Optional textual output returned by the apply patch tool.
2576 pub output: Option<String>,
2577 /// The ID of the entity that created this tool call output.
2578 #[serde(skip_serializing_if = "Option::is_none")]
2579 pub created_by: Option<String>,
2580}
2581
2582/// Output of an MCP server tool invocation.
2583#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2584pub struct MCPToolCall {
2585 /// A JSON string of the arguments passed to the tool.
2586 pub arguments: String,
2587 /// The unique ID of the tool call.
2588 pub id: String,
2589 /// The name of the tool that was run.
2590 pub name: String,
2591 /// The label of the MCP server running the tool.
2592 pub server_label: String,
2593 /// Unique identifier for the MCP tool call approval request. Include this value
2594 /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2595 /// tool call.
2596 pub approval_request_id: Option<String>,
2597 /// Error message from the call, if any.
2598 pub error: Option<String>,
2599 /// The output from the tool call.
2600 pub output: Option<String>,
2601 /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2602 /// `calling`, or `failed`.
2603 pub status: Option<MCPToolCallStatus>,
2604}
2605
2606#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2607#[serde(rename_all = "snake_case")]
2608pub enum MCPToolCallStatus {
2609 InProgress,
2610 Completed,
2611 Incomplete,
2612 Calling,
2613 Failed,
2614}
2615
2616#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2617pub struct MCPListTools {
2618 /// The unique ID of the list.
2619 pub id: String,
2620 /// The label of the MCP server.
2621 pub server_label: String,
2622 /// The tools available on the server.
2623 pub tools: Vec<MCPListToolsTool>,
2624 /// Error message if listing failed.
2625 #[serde(skip_serializing_if = "Option::is_none")]
2626 pub error: Option<String>,
2627}
2628
2629#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2630pub struct MCPApprovalRequest {
2631 /// JSON string of arguments for the tool.
2632 pub arguments: String,
2633 /// The unique ID of the approval request.
2634 pub id: String,
2635 /// The name of the tool to run.
2636 pub name: String,
2637 /// The label of the MCP server making the request.
2638 pub server_label: String,
2639}
2640
2641#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2642#[serde(untagged)]
2643pub enum Instructions {
2644 /// A text input to the model, equivalent to a text input with the `developer` role.
2645 Text(String),
2646 /// A list of one or many input items to the model, containing different content types.
2647 Array(Vec<InputItem>),
2648}
2649
2650/// The complete response returned by the Responses API.
2651#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2652pub struct Response {
2653 /// Whether to run the model response in the background.
2654 /// [Learn more](https://platform.openai.com/docs/guides/background).
2655 #[serde(skip_serializing_if = "Option::is_none")]
2656 pub background: Option<bool>,
2657
2658 /// Billing information for the response.
2659 #[serde(skip_serializing_if = "Option::is_none")]
2660 pub billing: Option<Billing>,
2661
2662 /// The conversation that this response belongs to. Input items and output
2663 /// items from this response are automatically added to this conversation.
2664 #[serde(skip_serializing_if = "Option::is_none")]
2665 pub conversation: Option<Conversation>,
2666
2667 /// Unix timestamp (in seconds) when this Response was created.
2668 pub created_at: u64,
2669
2670 /// Unix timestamp (in seconds) of when this Response was completed.
2671 /// Only present when the status is `completed`.
2672 #[serde(skip_serializing_if = "Option::is_none")]
2673 pub completed_at: Option<u64>,
2674
2675 /// An error object returned when the model fails to generate a Response.
2676 #[serde(skip_serializing_if = "Option::is_none")]
2677 pub error: Option<ErrorObject>,
2678
2679 /// Unique identifier for this response.
2680 pub id: String,
2681
2682 /// Details about why the response is incomplete, if any.
2683 #[serde(skip_serializing_if = "Option::is_none")]
2684 pub incomplete_details: Option<IncompleteDetails>,
2685
2686 /// A system (or developer) message inserted into the model's context.
2687 ///
2688 /// When using along with `previous_response_id`, the instructions from a previous response
2689 /// will not be carried over to the next response. This makes it simple to swap out
2690 /// system (or developer) messages in new responses.
2691 #[serde(skip_serializing_if = "Option::is_none")]
2692 pub instructions: Option<Instructions>,
2693
2694 /// An upper bound for the number of tokens that can be generated for a response,
2695 /// including visible output tokens and
2696 /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2697 #[serde(skip_serializing_if = "Option::is_none")]
2698 pub max_output_tokens: Option<u32>,
2699
2700 /// Set of 16 key-value pairs that can be attached to an object. This can be
2701 /// useful for storing additional information about the object in a structured
2702 /// format, and querying for objects via API or the dashboard.
2703 ///
2704 /// Keys are strings with a maximum length of 64 characters. Values are strings
2705 /// with a maximum length of 512 characters.
2706 #[serde(skip_serializing_if = "Option::is_none")]
2707 pub metadata: Option<HashMap<String, String>>,
2708
2709 /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2710 /// wide range of models with different capabilities, performance characteristics,
2711 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2712 pub model: String,
2713
2714 /// The object type of this resource - always set to `response`.
2715 pub object: String,
2716
2717 /// An array of content items generated by the model.
2718 ///
2719 /// - The length and order of items in the output array is dependent on the model's response.
2720 /// - Rather than accessing the first item in the output array and assuming it's an assistant
2721 /// message with the content generated by the model, you might consider using
2722 /// the `output_text` property where supported in SDKs.
2723 pub output: Vec<OutputItem>,
2724
2725 /// SDK-only convenience property that contains the aggregated text output from all
2726 /// `output_text` items in the `output` array, if any are present.
2727 /// Supported in the Python and JavaScript SDKs.
2728 // #[serde(skip_serializing_if = "Option::is_none")]
2729 // pub output_text: Option<String>,
2730
2731 /// Whether to allow the model to run tool calls in parallel.
2732 #[serde(skip_serializing_if = "Option::is_none")]
2733 pub parallel_tool_calls: Option<bool>,
2734
2735 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2736 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2737 /// Cannot be used in conjunction with `conversation`.
2738 #[serde(skip_serializing_if = "Option::is_none")]
2739 pub previous_response_id: Option<String>,
2740
2741 /// Reference to a prompt template and its variables.
2742 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2743 #[serde(skip_serializing_if = "Option::is_none")]
2744 pub prompt: Option<Prompt>,
2745
2746 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2747 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2748 #[serde(skip_serializing_if = "Option::is_none")]
2749 pub prompt_cache_key: Option<String>,
2750
2751 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2752 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2753 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2754 #[serde(skip_serializing_if = "Option::is_none")]
2755 pub prompt_cache_retention: Option<PromptCacheRetention>,
2756
2757 /// **gpt-5 and o-series models only**
2758 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2759 #[serde(skip_serializing_if = "Option::is_none")]
2760 pub reasoning: Option<Reasoning>,
2761
2762 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2763 /// usage policies.
2764 ///
2765 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2766 /// or email address, in order to avoid sending us any identifying information. [Learn
2767 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2768 #[serde(skip_serializing_if = "Option::is_none")]
2769 pub safety_identifier: Option<String>,
2770
2771 /// Specifies the processing type used for serving the request.
2772 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2773 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2774 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2775 /// - When not set, the default behavior is 'auto'.
2776 ///
2777 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2778 #[serde(skip_serializing_if = "Option::is_none")]
2779 pub service_tier: Option<ServiceTier>,
2780
2781 /// The status of the response generation.
2782 /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2783 pub status: Status,
2784
2785 /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2786 /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2787 ///
2788 /// We generally recommend altering this or `top_p` but not both.
2789 #[serde(skip_serializing_if = "Option::is_none")]
2790 pub temperature: Option<f32>,
2791
2792 /// Configuration options for a text response from the model. Can be plain
2793 /// text or structured JSON data. Learn more:
2794 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2795 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2796 #[serde(skip_serializing_if = "Option::is_none")]
2797 pub text: Option<ResponseTextParam>,
2798
2799 /// How the model should select which tool (or tools) to use when generating
2800 /// a response. See the `tools` parameter to see how to specify which tools
2801 /// the model can call.
2802 #[serde(skip_serializing_if = "Option::is_none")]
2803 pub tool_choice: Option<ToolChoiceParam>,
2804
2805 /// An array of tools the model may call while generating a response. You
2806 /// can specify which tool to use by setting the `tool_choice` parameter.
2807 ///
2808 /// We support the following categories of tools:
2809 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2810 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2811 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2812 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
2813 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2814 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
2815 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2816 /// - **Function calls (custom tools)**: Functions that are defined by you,
2817 /// enabling the model to call your own code with strongly typed arguments
2818 /// and outputs. Learn more about
2819 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2820 /// custom tools to call your own code.
2821 #[serde(skip_serializing_if = "Option::is_none")]
2822 pub tools: Option<Vec<Tool>>,
2823
2824 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2825 /// token position, each with an associated log probability.
2826 #[serde(skip_serializing_if = "Option::is_none")]
2827 pub top_logprobs: Option<u8>,
2828
2829 /// An alternative to sampling with temperature, called nucleus sampling,
2830 /// where the model considers the results of the tokens with top_p probability
2831 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2832 /// are considered.
2833 ///
2834 /// We generally recommend altering this or `temperature` but not both.
2835 #[serde(skip_serializing_if = "Option::is_none")]
2836 pub top_p: Option<f32>,
2837
2838 ///The truncation strategy to use for the model response.
2839 /// - `auto`: If the input to this Response exceeds
2840 /// the model's context window size, the model will truncate the
2841 /// response to fit the context window by dropping items from the beginning of the conversation.
2842 /// - `disabled` (default): If the input size will exceed the context window
2843 /// size for a model, the request will fail with a 400 error.
2844 #[serde(skip_serializing_if = "Option::is_none")]
2845 pub truncation: Option<Truncation>,
2846
2847 /// Represents token usage details including input tokens, output tokens,
2848 /// a breakdown of output tokens, and the total tokens used.
2849 #[serde(skip_serializing_if = "Option::is_none")]
2850 pub usage: Option<ResponseUsage>,
2851}
2852
2853#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2854#[serde(rename_all = "snake_case")]
2855pub enum Status {
2856 Completed,
2857 Failed,
2858 InProgress,
2859 Cancelled,
2860 Queued,
2861 Incomplete,
2862}
2863
2864/// Output item
2865#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2866#[serde(tag = "type")]
2867#[serde(rename_all = "snake_case")]
2868pub enum OutputItem {
2869 /// An output message from the model.
2870 Message(OutputMessage),
2871 /// The results of a file search tool call. See the
2872 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2873 /// for more information.
2874 FileSearchCall(FileSearchToolCall),
2875 /// A tool call to run a function. See the
2876 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2877 /// for more information.
2878 FunctionCall(FunctionToolCall),
2879 /// The results of a web search tool call. See the
2880 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2881 /// for more information.
2882 WebSearchCall(WebSearchToolCall),
2883 /// A tool call to a computer use tool. See the
2884 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2885 /// for more information.
2886 ComputerCall(ComputerToolCall),
2887 /// A description of the chain of thought used by a reasoning model while generating
2888 /// a response. Be sure to include these items in your `input` to the Responses API for
2889 /// subsequent turns of a conversation if you are manually
2890 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2891 Reasoning(ReasoningItem),
2892 /// A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
2893 Compaction(CompactionBody),
2894 /// An image generation request made by the model.
2895 ImageGenerationCall(ImageGenToolCall),
2896 /// A tool call to run code.
2897 CodeInterpreterCall(CodeInterpreterToolCall),
2898 /// A tool call to run a command on the local shell.
2899 LocalShellCall(LocalShellToolCall),
2900 /// A tool call that executes one or more shell commands in a managed environment.
2901 ShellCall(FunctionShellCall),
2902 /// The output of a shell tool call.
2903 ShellCallOutput(FunctionShellCallOutput),
2904 /// A tool call that applies file diffs by creating, deleting, or updating files.
2905 ApplyPatchCall(ApplyPatchToolCall),
2906 /// The output emitted by an apply patch tool call.
2907 ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2908 /// An invocation of a tool on an MCP server.
2909 McpCall(MCPToolCall),
2910 /// A list of tools available on an MCP server.
2911 McpListTools(MCPListTools),
2912 /// A request for human approval of a tool invocation.
2913 McpApprovalRequest(MCPApprovalRequest),
2914 /// A call to a custom tool created by the model.
2915 CustomToolCall(CustomToolCall),
2916 /// A tool search call.
2917 ToolSearchCall(ToolSearchCall),
2918 /// A tool search output.
2919 ToolSearchOutput(ToolSearchOutput),
2920}
2921
2922#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2923#[non_exhaustive]
2924pub struct CustomToolCall {
2925 /// An identifier used to map this custom tool call to a tool call output.
2926 pub call_id: String,
2927 /// The namespace of the custom tool being called.
2928 #[serde(skip_serializing_if = "Option::is_none")]
2929 pub namespace: Option<String>,
2930 /// The input for the custom tool call generated by the model.
2931 pub input: String,
2932 /// The name of the custom tool being called.
2933 pub name: String,
2934 /// The unique ID of the custom tool call in the OpenAI platform.
2935 pub id: String,
2936}
2937
2938#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2939pub struct DeleteResponse {
2940 pub object: String,
2941 pub deleted: bool,
2942 pub id: String,
2943}
2944
2945#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2946pub struct AnyItemReference {
2947 pub r#type: Option<String>,
2948 pub id: String,
2949}
2950
2951#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2952#[serde(tag = "type", rename_all = "snake_case")]
2953pub enum ItemResourceItem {
2954 Message(MessageItem),
2955 FileSearchCall(FileSearchToolCall),
2956 ComputerCall(ComputerToolCall),
2957 ComputerCallOutput(ComputerCallOutputItemParam),
2958 WebSearchCall(WebSearchToolCall),
2959 FunctionCall(FunctionToolCall),
2960 FunctionCallOutput(FunctionCallOutputItemParam),
2961 ToolSearchCall(ToolSearchCall),
2962 ToolSearchOutput(ToolSearchOutput),
2963 ImageGenerationCall(ImageGenToolCall),
2964 CodeInterpreterCall(CodeInterpreterToolCall),
2965 LocalShellCall(LocalShellToolCall),
2966 LocalShellCallOutput(LocalShellToolCallOutput),
2967 ShellCall(FunctionShellCallItemParam),
2968 ShellCallOutput(FunctionShellCallOutputItemParam),
2969 ApplyPatchCall(ApplyPatchToolCallItemParam),
2970 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2971 McpListTools(MCPListTools),
2972 McpApprovalRequest(MCPApprovalRequest),
2973 McpApprovalResponse(MCPApprovalResponse),
2974 McpCall(MCPToolCall),
2975}
2976
2977#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2978#[serde(untagged)]
2979pub enum ItemResource {
2980 ItemReference(AnyItemReference),
2981 Item(ItemResourceItem),
2982}
2983
2984/// A list of Response items.
2985#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2986pub struct ResponseItemList {
2987 /// The type of object returned, must be `list`.
2988 pub object: String,
2989 /// The ID of the first item in the list.
2990 pub first_id: Option<String>,
2991 /// The ID of the last item in the list.
2992 pub last_id: Option<String>,
2993 /// Whether there are more items in the list.
2994 pub has_more: bool,
2995 /// The list of items.
2996 pub data: Vec<ItemResource>,
2997}
2998
2999#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
3000#[builder(
3001 name = "TokenCountsBodyArgs",
3002 pattern = "mutable",
3003 setter(into, strip_option),
3004 default
3005)]
3006#[builder(build_fn(error = "OpenAIError"))]
3007pub struct TokenCountsBody {
3008 /// The conversation that this response belongs to. Items from this
3009 /// conversation are prepended to `input_items` for this response request.
3010 /// Input items and output items from this response are automatically added to this
3011 /// conversation after this response completes.
3012 #[serde(skip_serializing_if = "Option::is_none")]
3013 pub conversation: Option<ConversationParam>,
3014
3015 /// Text, image, or file inputs to the model, used to generate a response
3016 #[serde(skip_serializing_if = "Option::is_none")]
3017 pub input: Option<InputParam>,
3018
3019 /// A system (or developer) message inserted into the model's context.
3020 ///
3021 /// When used along with `previous_response_id`, the instructions from a previous response will
3022 /// not be carried over to the next response. This makes it simple to swap out system (or
3023 /// developer) messages in new responses.
3024 #[serde(skip_serializing_if = "Option::is_none")]
3025 pub instructions: Option<String>,
3026
3027 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
3028 /// wide range of models with different capabilities, performance characteristics,
3029 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
3030 /// to browse and compare available models.
3031 #[serde(skip_serializing_if = "Option::is_none")]
3032 pub model: Option<String>,
3033
3034 /// Whether to allow the model to run tool calls in parallel.
3035 #[serde(skip_serializing_if = "Option::is_none")]
3036 pub parallel_tool_calls: Option<bool>,
3037
3038 /// The unique ID of the previous response to the model. Use this to create multi-turn
3039 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
3040 /// Cannot be used in conjunction with `conversation`.
3041 #[serde(skip_serializing_if = "Option::is_none")]
3042 pub previous_response_id: Option<String>,
3043
3044 /// **gpt-5 and o-series models only**
3045 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
3046 #[serde(skip_serializing_if = "Option::is_none")]
3047 pub reasoning: Option<Reasoning>,
3048
3049 /// Configuration options for a text response from the model. Can be plain
3050 /// text or structured JSON data. Learn more:
3051 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
3052 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
3053 #[serde(skip_serializing_if = "Option::is_none")]
3054 pub text: Option<ResponseTextParam>,
3055
3056 /// How the model should select which tool (or tools) to use when generating
3057 /// a response. See the `tools` parameter to see how to specify which tools
3058 /// the model can call.
3059 #[serde(skip_serializing_if = "Option::is_none")]
3060 pub tool_choice: Option<ToolChoiceParam>,
3061
3062 /// An array of tools the model may call while generating a response. You can specify which tool
3063 /// to use by setting the `tool_choice` parameter.
3064 #[serde(skip_serializing_if = "Option::is_none")]
3065 pub tools: Option<Vec<Tool>>,
3066
3067 ///The truncation strategy to use for the model response.
3068 /// - `auto`: If the input to this Response exceeds
3069 /// the model's context window size, the model will truncate the
3070 /// response to fit the context window by dropping items from the beginning of the conversation.
3071 /// - `disabled` (default): If the input size will exceed the context window
3072 /// size for a model, the request will fail with a 400 error.
3073 #[serde(skip_serializing_if = "Option::is_none")]
3074 pub truncation: Option<Truncation>,
3075}
3076
3077#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3078pub struct TokenCountsResource {
3079 pub object: String,
3080 pub input_tokens: u32,
3081}
3082
3083/// A compaction item generated by the `/v1/responses/compact` API.
3084#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3085pub struct CompactionSummaryItemParam {
3086 /// The ID of the compaction item.
3087 #[serde(skip_serializing_if = "Option::is_none")]
3088 pub id: Option<String>,
3089 /// The encrypted content of the compaction summary.
3090 pub encrypted_content: String,
3091}
3092
3093/// A compaction item generated by the `/v1/responses/compact` API.
3094#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3095pub struct CompactionBody {
3096 /// The unique ID of the compaction item.
3097 pub id: String,
3098 /// The encrypted content that was produced by compaction.
3099 pub encrypted_content: String,
3100 /// The identifier of the actor that created the item.
3101 #[serde(skip_serializing_if = "Option::is_none")]
3102 pub created_by: Option<String>,
3103}
3104
3105/// Request to compact a conversation.
3106#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
3107#[builder(name = "CompactResponseRequestArgs")]
3108#[builder(pattern = "mutable")]
3109#[builder(setter(into, strip_option), default)]
3110#[builder(derive(Debug))]
3111#[builder(build_fn(error = "OpenAIError"))]
3112pub struct CompactResponseRequest {
3113 /// Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of models
3114 /// with different capabilities, performance characteristics, and price points. Refer to the
3115 /// [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
3116 pub model: String,
3117
3118 /// Text, image, or file inputs to the model, used to generate a response
3119 #[serde(skip_serializing_if = "Option::is_none")]
3120 pub input: Option<InputParam>,
3121
3122 /// The unique ID of the previous response to the model. Use this to create multi-turn
3123 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
3124 /// Cannot be used in conjunction with `conversation`.
3125 #[serde(skip_serializing_if = "Option::is_none")]
3126 pub previous_response_id: Option<String>,
3127
3128 /// A system (or developer) message inserted into the model's context.
3129 ///
3130 /// When used along with `previous_response_id`, the instructions from a previous response will
3131 /// not be carried over to the next response. This makes it simple to swap out system (or
3132 /// developer) messages in new responses.
3133 #[serde(skip_serializing_if = "Option::is_none")]
3134 pub instructions: Option<String>,
3135
3136 /// A key to use when reading from or writing to the prompt cache.
3137 #[serde(skip_serializing_if = "Option::is_none")]
3138 pub prompt_cache_key: Option<String>,
3139}
3140
3141/// The compacted response object.
3142#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3143pub struct CompactResource {
3144 /// The unique identifier for the compacted response.
3145 pub id: String,
3146 /// The object type. Always `response.compaction`.
3147 pub object: String,
3148 /// The compacted list of output items. This is a list of all user messages,
3149 /// followed by a single compaction item.
3150 pub output: Vec<OutputItem>,
3151 /// Unix timestamp (in seconds) when the compacted conversation was created.
3152 pub created_at: u64,
3153 /// Token accounting for the compaction pass, including cached, reasoning, and total tokens.
3154 pub usage: ResponseUsage,
3155}
3156
3157// ============================================================
3158// Container / Environment Types
3159// ============================================================
3160
3161/// A domain-scoped secret injected for allowlisted domains.
3162#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3163pub struct ContainerNetworkPolicyDomainSecretParam {
3164 /// The domain associated with the secret.
3165 pub domain: String,
3166 /// The name of the secret to inject for the domain.
3167 pub name: String,
3168 /// The secret value to inject for the domain.
3169 pub value: String,
3170}
3171
3172/// Details for an allowlist network policy.
3173#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3174pub struct ContainerNetworkPolicyAllowlistDetails {
3175 /// A list of allowed domains.
3176 pub allowed_domains: Vec<String>,
3177 /// Optional domain-scoped secrets for allowlisted domains.
3178 #[serde(skip_serializing_if = "Option::is_none")]
3179 pub domain_secrets: Option<Vec<ContainerNetworkPolicyDomainSecretParam>>,
3180}
3181
3182/// Network access policy for a container.
3183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3184#[serde(tag = "type", rename_all = "snake_case")]
3185pub enum ContainerNetworkPolicy {
3186 /// Disable all outbound network access.
3187 Disabled,
3188 /// Allow access only to specified domains.
3189 Allowlist(ContainerNetworkPolicyAllowlistDetails),
3190}
3191
3192/// A skill referenced by ID.
3193#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3194pub struct SkillReferenceParam {
3195 /// The ID of the skill to reference.
3196 pub skill_id: String,
3197 /// An optional specific version to use.
3198 #[serde(skip_serializing_if = "Option::is_none")]
3199 pub version: Option<String>,
3200}
3201
3202/// An inline skill source (base64-encoded zip).
3203#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3204pub struct InlineSkillSourceParam {
3205 /// The media type. Always `"application/zip"`.
3206 pub media_type: String,
3207 /// The base64-encoded skill data.
3208 pub data: String,
3209}
3210
3211/// An inline skill definition.
3212#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3213pub struct InlineSkillParam {
3214 /// The name of the skill.
3215 pub name: String,
3216 /// The description of the skill.
3217 pub description: String,
3218 /// The inline source for the skill.
3219 pub source: InlineSkillSourceParam,
3220}
3221
3222/// A skill parameter — either a reference or inline definition.
3223#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3224#[serde(tag = "type", rename_all = "snake_case")]
3225pub enum SkillParam {
3226 /// Reference a skill by ID.
3227 SkillReference(SkillReferenceParam),
3228 /// Provide an inline skill definition.
3229 Inline(InlineSkillParam),
3230}
3231
3232/// Automatically creates a container for the request.
3233#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3234pub struct ContainerAutoParam {
3235 /// An optional list of uploaded file IDs to make available in the container.
3236 #[serde(skip_serializing_if = "Option::is_none")]
3237 pub file_ids: Option<Vec<String>>,
3238 /// Network access policy for the container.
3239 #[serde(skip_serializing_if = "Option::is_none")]
3240 pub network_policy: Option<ContainerNetworkPolicy>,
3241 /// An optional list of skills to make available in the container.
3242 #[serde(skip_serializing_if = "Option::is_none")]
3243 pub skills: Option<Vec<SkillParam>>,
3244}
3245
3246/// A local skill available in a local environment.
3247#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3248pub struct LocalSkillParam {
3249 /// The name of the skill.
3250 pub name: String,
3251 /// The description of the skill.
3252 pub description: String,
3253 /// The path to the directory containing the skill.
3254 pub path: String,
3255}
3256
3257/// Uses a local computer environment.
3258#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3259pub struct LocalEnvironmentParam {
3260 /// An optional list of local skills.
3261 #[serde(skip_serializing_if = "Option::is_none")]
3262 pub skills: Option<Vec<LocalSkillParam>>,
3263}
3264
3265/// References a container created with the /v1/containers endpoint.
3266#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3267pub struct ContainerReferenceParam {
3268 /// The ID of the referenced container.
3269 pub container_id: String,
3270}
3271
3272/// A resource reference to a container by ID.
3273#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3274pub struct ContainerReferenceResource {
3275 /// The ID of the referenced container.
3276 pub container_id: String,
3277}
3278
3279/// The execution environment for a shell tool — container or local.
3280#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3281#[serde(tag = "type", rename_all = "snake_case")]
3282pub enum FunctionShellEnvironment {
3283 /// Automatically creates a container for this request.
3284 ContainerAuto(ContainerAutoParam),
3285 /// Use a local computer environment.
3286 Local(LocalEnvironmentParam),
3287 /// Reference an existing container by ID.
3288 ContainerReference(ContainerReferenceParam),
3289}
3290
3291/// Parameters for the shell function tool.
3292#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
3293pub struct FunctionShellToolParam {
3294 /// The execution environment for the shell tool.
3295 #[serde(skip_serializing_if = "Option::is_none")]
3296 pub environment: Option<FunctionShellEnvironment>,
3297}
3298
3299/// Context management configuration.
3300#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
3301pub struct ContextManagementParam {
3302 /// The context management strategy type.
3303 #[serde(rename = "type")]
3304 pub type_: String,
3305 /// Minimum number of tokens to retain before compacting.
3306 #[serde(skip_serializing_if = "Option::is_none")]
3307 pub compact_threshold: Option<u32>,
3308}