async_openai/types/responses/response.rs
1use crate::error::OpenAIError;
2pub use crate::types::chat::{
3 CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort,
4 ResponseFormatJsonSchema,
5};
6use crate::types::{MCPListToolsTool, MCPTool};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15 #[default]
16 User,
17 Assistant,
18 System,
19 Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26 InProgress,
27 Completed,
28 Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34 /// A text input to the model, equivalent to a text input with the
35 /// `user` role.
36 Text(String),
37 /// A list of one or many input items to the model, containing
38 /// different content types.
39 Items(Vec<InputItem>),
40}
41
42impl Default for InputParam {
43 fn default() -> Self {
44 Self::Text(String::new())
45 }
46}
47
48/// Content item used to generate a response.
49///
50/// This is a properly discriminated union based on the `type` field, using Rust's
51/// type-safe enum with serde's tag attribute for efficient deserialization.
52///
53/// # OpenAPI Specification
54/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
55#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
56#[serde(tag = "type", rename_all = "snake_case")]
57pub enum Item {
58 /// A message (type: "message").
59 /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
60 ///
61 /// InputMessage:
62 /// A message input to the model with a role indicating instruction following hierarchy.
63 /// Instructions given with the developer or system role take precedence over instructions given with the user role.
64 /// OutputMessage:
65 /// A message output from the model.
66 Message(MessageItem),
67
68 /// The results of a file search tool call. See the
69 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
70 FileSearchCall(FileSearchToolCall),
71
72 /// A tool call to a computer use tool. See the
73 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
74 ComputerCall(ComputerToolCall),
75
76 /// The output of a computer tool call.
77 ComputerCallOutput(ComputerCallOutputItemParam),
78
79 /// The results of a web search tool call. See the
80 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
81 WebSearchCall(WebSearchToolCall),
82
83 /// A tool call to run a function. See the
84 ///
85 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
86 FunctionCall(FunctionToolCall),
87
88 /// The output of a function tool call.
89 FunctionCallOutput(FunctionCallOutputItemParam),
90
91 /// A description of the chain of thought used by a reasoning model while generating
92 /// a response. Be sure to include these items in your `input` to the Responses API
93 /// for subsequent turns of a conversation if you are manually
94 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
95 Reasoning(ReasoningItem),
96
97 /// An image generation request made by the model.
98 ImageGenerationCall(ImageGenToolCall),
99
100 /// A tool call to run code.
101 CodeInterpreterCall(CodeInterpreterToolCall),
102
103 /// A tool call to run a command on the local shell.
104 LocalShellCall(LocalShellToolCall),
105
106 /// The output of a local shell tool call.
107 LocalShellCallOutput(LocalShellToolCallOutput),
108
109 /// A list of tools available on an MCP server.
110 McpListTools(MCPListTools),
111
112 /// A request for human approval of a tool invocation.
113 McpApprovalRequest(MCPApprovalRequest),
114
115 /// A response to an MCP approval request.
116 McpApprovalResponse(MCPApprovalResponse),
117
118 /// An invocation of a tool on an MCP server.
119 McpCall(MCPToolCall),
120
121 /// The output of a custom tool call from your code, being sent back to the model.
122 CustomToolCallOutput(CustomToolCallOutput),
123
124 /// A call to a custom tool created by the model.
125 CustomToolCall(CustomToolCall),
126}
127
128/// Input item that can be used in the context for generating a response.
129///
130/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
131/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
132/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
133/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
134///
135/// Uses untagged deserialization because these types overlap in structure.
136/// Order matters: more specific structures are tried first.
137///
138/// # OpenAPI Specification
139/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
140#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
141#[serde(untagged)]
142pub enum InputItem {
143 /// A reference to an existing item by ID.
144 /// Has a required `id` field and optional `type` (can be "item_reference" or null).
145 /// Must be tried first as it's the most minimal structure.
146 ItemReference(ItemReference),
147
148 /// All structured items with proper type discrimination.
149 /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
150 /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
151 Item(Item),
152
153 /// A simple, user-friendly message input (EasyInputMessage).
154 /// Supports string content and can include assistant role for previous responses.
155 /// Must be tried last as it's the most flexible structure.
156 ///
157 /// A message input to the model with a role indicating instruction following
158 /// hierarchy. Instructions given with the `developer` or `system` role take
159 /// precedence over instructions given with the `user` role. Messages with the
160 /// `assistant` role are presumed to have been generated by the model in previous
161 /// interactions.
162 EasyMessage(EasyInputMessage),
163}
164
165impl InputItem {
166 /// Creates an InputItem from an item reference ID.
167 pub fn from_reference(id: impl Into<String>) -> Self {
168 Self::ItemReference(ItemReference::new(id))
169 }
170
171 /// Creates an InputItem from a structured Item.
172 pub fn from_item(item: Item) -> Self {
173 Self::Item(item)
174 }
175
176 /// Creates an InputItem from an EasyInputMessage.
177 pub fn from_easy_message(message: EasyInputMessage) -> Self {
178 Self::EasyMessage(message)
179 }
180
181 /// Creates a simple text message with the given role and content.
182 pub fn text_message(role: Role, content: impl Into<String>) -> Self {
183 Self::EasyMessage(EasyInputMessage {
184 r#type: MessageType::Message,
185 role,
186 content: EasyInputContent::Text(content.into()),
187 })
188 }
189}
190
191/// A message item used within the `Item` enum.
192///
193/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
194/// enum to distinguish them based on their structure:
195/// - OutputMessage: role=assistant, required id & status fields
196/// - InputMessage: role=user/system/developer, content is Vec<ContentType>, optional id/status
197///
198/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
199/// not part of the structured `Item` enum.
200#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
201#[serde(untagged)]
202pub enum MessageItem {
203 /// An output message from the model (role: assistant, has required id & status).
204 /// This must come first as it has the most specific structure (required id and status fields).
205 Output(OutputMessage),
206
207 /// A structured input message (role: user/system/developer, content is Vec<ContentType>).
208 /// Has structured content list and optional id/status fields.
209 ///
210 /// A message input to the model with a role indicating instruction following hierarchy.
211 /// Instructions given with the `developer` or `system` role take precedence over instructions
212 /// given with the `user` role.
213 Input(InputMessage),
214}
215
216/// A reference to an existing item by ID.
217#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
218pub struct ItemReference {
219 /// The type of item to reference. Can be "item_reference" or null.
220 #[serde(skip_serializing_if = "Option::is_none")]
221 pub r#type: Option<ItemReferenceType>,
222 /// The ID of the item to reference.
223 pub id: String,
224}
225
226impl ItemReference {
227 /// Create a new item reference with the given ID.
228 pub fn new(id: impl Into<String>) -> Self {
229 Self {
230 r#type: Some(ItemReferenceType::ItemReference),
231 id: id.into(),
232 }
233 }
234}
235
236#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
237#[serde(rename_all = "snake_case")]
238pub enum ItemReferenceType {
239 ItemReference,
240}
241
242/// Output from a function call that you're providing back to the model.
243#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
244pub struct FunctionCallOutputItemParam {
245 /// The unique ID of the function tool call generated by the model.
246 pub call_id: String,
247 /// Text, image, or file output of the function tool call.
248 pub output: FunctionCallOutput,
249 /// The unique ID of the function tool call output.
250 /// Populated when this item is returned via API.
251 #[serde(skip_serializing_if = "Option::is_none")]
252 pub id: Option<String>,
253 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
254 /// Populated when items are returned via API.
255 #[serde(skip_serializing_if = "Option::is_none")]
256 pub status: Option<OutputStatus>,
257}
258
259#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
260#[serde(untagged)]
261pub enum FunctionCallOutput {
262 /// A JSON string of the output of the function tool call.
263 Text(String),
264 Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
265}
266
267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
268pub struct ComputerCallOutputItemParam {
269 /// The ID of the computer tool call that produced the output.
270 pub call_id: String,
271 /// A computer screenshot image used with the computer use tool.
272 pub output: ComputerScreenshotImage,
273 /// The safety checks reported by the API that have been acknowledged by the developer.
274 #[serde(skip_serializing_if = "Option::is_none")]
275 pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
276 /// The unique ID of the computer tool call output. Optional when creating.
277 #[serde(skip_serializing_if = "Option::is_none")]
278 pub id: Option<String>,
279 /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
280 /// Populated when input items are returned via API.
281 #[serde(skip_serializing_if = "Option::is_none")]
282 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
283}
284
285#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
286#[serde(rename_all = "snake_case")]
287pub enum ComputerScreenshotImageType {
288 ComputerScreenshot,
289}
290
291/// A computer screenshot image used with the computer use tool.
292#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
293pub struct ComputerScreenshotImage {
294 /// Specifies the event type. For a computer screenshot, this property is always
295 /// set to `computer_screenshot`.
296 pub r#type: ComputerScreenshotImageType,
297 /// The identifier of an uploaded file that contains the screenshot.
298 #[serde(skip_serializing_if = "Option::is_none")]
299 pub file_id: Option<String>,
300 /// The URL of the screenshot image.
301 #[serde(skip_serializing_if = "Option::is_none")]
302 pub image_url: Option<String>,
303}
304
305/// Output from a local shell tool call that you're providing back to the model.
306#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
307pub struct LocalShellToolCallOutput {
308 /// The unique ID of the local shell tool call generated by the model.
309 pub id: String,
310
311 /// A JSON string of the output of the local shell tool call.
312 pub output: String,
313
314 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
315 #[serde(skip_serializing_if = "Option::is_none")]
316 pub status: Option<OutputStatus>,
317}
318
319/// Output from a local shell command execution.
320#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
321pub struct LocalShellOutput {
322 /// The stdout output from the command.
323 #[serde(skip_serializing_if = "Option::is_none")]
324 pub stdout: Option<String>,
325
326 /// The stderr output from the command.
327 #[serde(skip_serializing_if = "Option::is_none")]
328 pub stderr: Option<String>,
329
330 /// The exit code of the command.
331 #[serde(skip_serializing_if = "Option::is_none")]
332 pub exit_code: Option<i32>,
333}
334
335/// An MCP approval response that you're providing back to the model.
336#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
337pub struct MCPApprovalResponse {
338 /// The ID of the approval request being answered.
339 pub approval_request_id: String,
340
341 /// Whether the request was approved.
342 pub approve: bool,
343
344 /// The unique ID of the approval response
345 #[serde(skip_serializing_if = "Option::is_none")]
346 pub id: Option<String>,
347
348 /// Optional reason for the decision.
349 #[serde(skip_serializing_if = "Option::is_none")]
350 pub reason: Option<String>,
351}
352
353#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
354#[serde(untagged)]
355pub enum CustomToolCallOutputOutput {
356 /// A string of the output of the custom tool call.
357 Text(String),
358 /// Text, image, or file output of the custom tool call.
359 List(Vec<InputContent>),
360}
361
362#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
363pub struct CustomToolCallOutput {
364 /// The call ID, used to map this custom tool call output to a custom tool call.
365 pub call_id: String,
366
367 /// The output from the custom tool call generated by your code.
368 /// Can be a string or an list of output content.
369 pub output: CustomToolCallOutputOutput,
370
371 /// The unique ID of the custom tool call output in the OpenAI platform.
372 #[serde(skip_serializing_if = "Option::is_none")]
373 pub id: Option<String>,
374}
375
376/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
377///
378/// This is the most user-friendly way to provide messages, supporting both simple
379/// string content and structured content. Role can include `assistant` for providing
380/// previous assistant responses.
381#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
382#[builder(
383 name = "EasyInputMessageArgs",
384 pattern = "mutable",
385 setter(into, strip_option),
386 default
387)]
388#[builder(build_fn(error = "OpenAIError"))]
389pub struct EasyInputMessage {
390 /// The type of the message input. Always set to `message`.
391 pub r#type: MessageType,
392 /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
393 pub role: Role,
394 /// Text, image, or audio input to the model, used to generate a response.
395 /// Can also contain previous assistant responses.
396 pub content: EasyInputContent,
397}
398
399/// A structured message input to the model (InputMessage in the OpenAPI spec).
400///
401/// This variant requires structured content (not a simple string) and does not support
402/// the `assistant` role (use OutputMessage for that). Used when items are returned via API
403/// with additional metadata.
404#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
405#[builder(
406 name = "InputMessageArgs",
407 pattern = "mutable",
408 setter(into, strip_option),
409 default
410)]
411#[builder(build_fn(error = "OpenAIError"))]
412pub struct InputMessage {
413 /// A list of one or many input items to the model, containing different content types.
414 pub content: Vec<InputContent>,
415 /// The role of the message input. One of `user`, `system`, or `developer`.
416 /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
417 pub role: InputRole,
418 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
419 /// Populated when items are returned via API.
420 #[serde(skip_serializing_if = "Option::is_none")]
421 pub status: Option<OutputStatus>,
422 /////The type of the message input. Always set to `message`.
423 //pub r#type: MessageType,
424}
425
426/// The role for an input message - can only be `user`, `system`, or `developer`.
427/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
428#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
429#[serde(rename_all = "lowercase")]
430pub enum InputRole {
431 #[default]
432 User,
433 System,
434 Developer,
435}
436
437/// Content for EasyInputMessage - can be a simple string or structured list.
438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
439#[serde(untagged)]
440pub enum EasyInputContent {
441 /// A text input to the model.
442 Text(String),
443 /// A list of one or many input items to the model, containing different content types.
444 ContentList(Vec<InputContent>),
445}
446
447/// Parts of a message: text, image, file, or audio.
448#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
449#[serde(tag = "type", rename_all = "snake_case")]
450pub enum InputContent {
451 /// A text input to the model.
452 InputText(InputTextContent),
453 /// An image input to the model. Learn about
454 /// [image inputs](https://platform.openai.com/docs/guides/vision).
455 InputImage(InputImageContent),
456 /// A file input to the model.
457 InputFile(InputFileContent),
458}
459
460#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
461pub struct InputTextContent {
462 /// The text input to the model.
463 pub text: String,
464}
465
466#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
467#[builder(
468 name = "InputImageArgs",
469 pattern = "mutable",
470 setter(into, strip_option),
471 default
472)]
473#[builder(build_fn(error = "OpenAIError"))]
474pub struct InputImageContent {
475 /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
476 /// Defaults to `auto`.
477 detail: ImageDetail,
478 /// The ID of the file to be sent to the model.
479 #[serde(skip_serializing_if = "Option::is_none")]
480 file_id: Option<String>,
481 /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
482 /// in a data URL.
483 #[serde(skip_serializing_if = "Option::is_none")]
484 image_url: Option<String>,
485}
486
487#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
488#[builder(
489 name = "InputFileArgs",
490 pattern = "mutable",
491 setter(into, strip_option),
492 default
493)]
494#[builder(build_fn(error = "OpenAIError"))]
495pub struct InputFileContent {
496 /// The content of the file to be sent to the model.
497 #[serde(skip_serializing_if = "Option::is_none")]
498 file_data: Option<String>,
499 /// The ID of the file to be sent to the model.
500 #[serde(skip_serializing_if = "Option::is_none")]
501 file_id: Option<String>,
502 /// The URL of the file to be sent to the model.
503 #[serde(skip_serializing_if = "Option::is_none")]
504 file_url: Option<String>,
505 /// The name of the file to be sent to the model.
506 #[serde(skip_serializing_if = "Option::is_none")]
507 filename: Option<String>,
508}
509
510#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
511pub struct Conversation {
512 /// The unique ID of the conversation.
513 pub id: String,
514}
515
516#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
517#[serde(untagged)]
518pub enum ConversationParam {
519 /// The unique ID of the conversation.
520 ConversationID(String),
521 /// The conversation that this response belongs to.
522 Object(Conversation),
523}
524
525#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
526pub enum IncludeEnum {
527 #[serde(rename = "file_search_call.results")]
528 FileSearchCallResults,
529 #[serde(rename = "web_search_call.results")]
530 WebSearchCallResults,
531 #[serde(rename = "web_search_call.action.sources")]
532 WebSearchCallActionSources,
533 #[serde(rename = "message.input_image.image_url")]
534 MessageInputImageImageUrl,
535 #[serde(rename = "computer_call_output.output.image_url")]
536 ComputerCallOutputOutputImageUrl,
537 #[serde(rename = "code_interpreter_call.outputs")]
538 CodeInterpreterCallOutputs,
539 #[serde(rename = "reasoning.encrypted_content")]
540 ReasoningEncryptedContent,
541 #[serde(rename = "message.output_text.logprobs")]
542 MessageOutputTextLogprobs,
543}
544
545#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
546pub struct ResponseStreamOptions {
547 /// When true, stream obfuscation will be enabled. Stream obfuscation adds
548 /// random characters to an `obfuscation` field on streaming delta events to
549 /// normalize payload sizes as a mitigation to certain side-channel attacks.
550 /// These obfuscation fields are included by default, but add a small amount
551 /// of overhead to the data stream. You can set `include_obfuscation` to
552 /// false to optimize for bandwidth if you trust the network links between
553 /// your application and the OpenAI API.
554 #[serde(skip_serializing_if = "Option::is_none")]
555 pub include_obfuscation: Option<bool>,
556}
557
558/// Builder for a Responses API request.
559#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
560#[builder(
561 name = "CreateResponseArgs",
562 pattern = "mutable",
563 setter(into, strip_option),
564 default
565)]
566#[builder(build_fn(error = "OpenAIError"))]
567pub struct CreateResponse {
568 /// Whether to run the model response in the background.
569 /// [Learn more](https://platform.openai.com/docs/guides/background).
570 #[serde(skip_serializing_if = "Option::is_none")]
571 pub background: Option<bool>,
572
573 /// The conversation that this response belongs to. Items from this conversation are prepended to
574 /// `input_items` for this response request.
575 ///
576 /// Input items and output items from this response are automatically added to this conversation after
577 /// this response completes.
578 #[serde(skip_serializing_if = "Option::is_none")]
579 pub conversation: Option<ConversationParam>,
580
581 /// Specify additional output data to include in the model response. Currently supported
582 /// values are:
583 ///
584 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
585 ///
586 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
587 /// interpreter tool call items.
588 ///
589 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
590 /// output.
591 ///
592 /// - `file_search_call.results`: Include the search results of the file search tool call.
593 ///
594 /// - `message.input_image.image_url`: Include image urls from the input message.
595 ///
596 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
597 ///
598 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
599 /// reasoning item outputs. This enables reasoning items to be used in multi-turn
600 /// conversations when using the Responses API statelessly (like when the `store` parameter is
601 /// set to `false`, or when an organization is enrolled in the zero data retention program).
602 #[serde(skip_serializing_if = "Option::is_none")]
603 pub include: Option<Vec<IncludeEnum>>,
604
605 /// Text, image, or file inputs to the model, used to generate a response.
606 ///
607 /// Learn more:
608 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
609 /// - [Image inputs](https://platform.openai.com/docs/guides/images)
610 /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
611 /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
612 /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
613 pub input: InputParam,
614
615 /// A system (or developer) message inserted into the model's context.
616 ///
617 /// When using along with `previous_response_id`, the instructions from a previous
618 /// response will not be carried over to the next response. This makes it simple
619 /// to swap out system (or developer) messages in new responses.
620 #[serde(skip_serializing_if = "Option::is_none")]
621 pub instructions: Option<String>,
622
623 /// An upper bound for the number of tokens that can be generated for a response, including
624 /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
625 #[serde(skip_serializing_if = "Option::is_none")]
626 pub max_output_tokens: Option<u32>,
627
628 /// The maximum number of total calls to built-in tools that can be processed in a response. This
629 /// maximum number applies across all built-in tool calls, not per individual tool. Any further
630 /// attempts to call a tool by the model will be ignored.
631 #[serde(skip_serializing_if = "Option::is_none")]
632 pub max_tool_calls: Option<u32>,
633
634 /// Set of 16 key-value pairs that can be attached to an object. This can be
635 /// useful for storing additional information about the object in a structured
636 /// format, and querying for objects via API or the dashboard.
637 ///
638 /// Keys are strings with a maximum length of 64 characters. Values are
639 /// strings with a maximum length of 512 characters.
640 #[serde(skip_serializing_if = "Option::is_none")]
641 pub metadata: Option<HashMap<String, String>>,
642
643 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
644 /// offers a wide range of models with different capabilities, performance
645 /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
646 /// to browse and compare available models.
647 #[serde(skip_serializing_if = "Option::is_none")]
648 pub model: Option<String>,
649
650 /// Whether to allow the model to run tool calls in parallel.
651 #[serde(skip_serializing_if = "Option::is_none")]
652 pub parallel_tool_calls: Option<bool>,
653
654 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
655 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
656 /// Cannot be used in conjunction with `conversation`.
657 #[serde(skip_serializing_if = "Option::is_none")]
658 pub previous_response_id: Option<String>,
659
660 /// Reference to a prompt template and its variables.
661 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
662 #[serde(skip_serializing_if = "Option::is_none")]
663 pub prompt: Option<Prompt>,
664
665 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
666 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
667 #[serde(skip_serializing_if = "Option::is_none")]
668 pub prompt_cache_key: Option<String>,
669
670 /// **gpt-5 and o-series models only**
671 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
672 #[serde(skip_serializing_if = "Option::is_none")]
673 pub reasoning: Option<Reasoning>,
674
675 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
676 /// usage policies.
677 ///
678 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
679 /// or email address, in order to avoid sending us any identifying information. [Learn
680 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
681 #[serde(skip_serializing_if = "Option::is_none")]
682 pub safety_identifier: Option<String>,
683
684 /// Specifies the processing type used for serving the request.
685 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
686 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
687 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
688 /// - When not set, the default behavior is 'auto'.
689 ///
690 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
691 #[serde(skip_serializing_if = "Option::is_none")]
692 pub service_tier: Option<ServiceTier>,
693
694 /// Whether to store the generated model response for later retrieval via API.
695 #[serde(skip_serializing_if = "Option::is_none")]
696 pub store: Option<bool>,
697
698 /// If set to true, the model response data will be streamed to the client
699 /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
700 /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
701 /// for more information.
702 #[serde(skip_serializing_if = "Option::is_none")]
703 pub stream: Option<bool>,
704
705 /// Options for streaming responses. Only set this when you set `stream: true`.
706 #[serde(skip_serializing_if = "Option::is_none")]
707 pub stream_options: Option<ResponseStreamOptions>,
708
709 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
710 /// will make the output more random, while lower values like 0.2 will make it
711 /// more focused and deterministic. We generally recommend altering this or
712 /// `top_p` but not both.
713 #[serde(skip_serializing_if = "Option::is_none")]
714 pub temperature: Option<f32>,
715
716 /// Configuration options for a text response from the model. Can be plain
717 /// text or structured JSON data. Learn more:
718 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
719 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
720 #[serde(skip_serializing_if = "Option::is_none")]
721 pub text: Option<ResponseTextParam>,
722
723 /// How the model should select which tool (or tools) to use when generating
724 /// a response. See the `tools` parameter to see how to specify which tools
725 /// the model can call.
726 #[serde(skip_serializing_if = "Option::is_none")]
727 pub tool_choice: Option<ToolChoiceParam>,
728
729 /// An array of tools the model may call while generating a response. You
730 /// can specify which tool to use by setting the `tool_choice` parameter.
731 ///
732 /// We support the following categories of tools:
733 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
734 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
735 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
736 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
737 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
738 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
739 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
740 /// - **Function calls (custom tools)**: Functions that are defined by you,
741 /// enabling the model to call your own code with strongly typed arguments
742 /// and outputs. Learn more about
743 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
744 /// custom tools to call your own code.
745 #[serde(skip_serializing_if = "Option::is_none")]
746 pub tools: Option<Vec<Tool>>,
747
748 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
749 /// token position, each with an associated log probability.
750 #[serde(skip_serializing_if = "Option::is_none")]
751 pub top_logprobs: Option<u8>,
752
753 /// An alternative to sampling with temperature, called nucleus sampling,
754 /// where the model considers the results of the tokens with top_p probability
755 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
756 /// are considered.
757 ///
758 /// We generally recommend altering this or `temperature` but not both.
759 #[serde(skip_serializing_if = "Option::is_none")]
760 pub top_p: Option<f32>,
761
762 ///The truncation strategy to use for the model response.
763 /// - `auto`: If the input to this Response exceeds
764 /// the model's context window size, the model will truncate the
765 /// response to fit the context window by dropping items from the beginning of the conversation.
766 /// - `disabled` (default): If the input size will exceed the context window
767 /// size for a model, the request will fail with a 400 error.
768 #[serde(skip_serializing_if = "Option::is_none")]
769 pub truncation: Option<Truncation>,
770}
771
772#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
773#[serde(untagged)]
774pub enum ResponsePromptVariables {
775 String(String),
776 Content(InputContent),
777 Custom(serde_json::Value),
778}
779
780#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
781pub struct Prompt {
782 /// The unique identifier of the prompt template to use.
783 pub id: String,
784
785 /// Optional version of the prompt template.
786 #[serde(skip_serializing_if = "Option::is_none")]
787 pub version: Option<String>,
788
789 /// Optional map of values to substitute in for variables in your
790 /// prompt. The substitution values can either be strings, or other
791 /// Response input types like images or files.
792 #[serde(skip_serializing_if = "Option::is_none")]
793 pub variables: Option<ResponsePromptVariables>,
794}
795
796#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
797#[serde(rename_all = "lowercase")]
798pub enum ServiceTier {
799 #[default]
800 Auto,
801 Default,
802 Flex,
803 Scale,
804 Priority,
805}
806
807/// Truncation strategies.
808#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
809#[serde(rename_all = "lowercase")]
810pub enum Truncation {
811 Auto,
812 Disabled,
813}
814
815#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
816pub struct Billing {
817 pub payer: String,
818}
819
820/// o-series reasoning settings.
821#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
822#[builder(
823 name = "ReasoningArgs",
824 pattern = "mutable",
825 setter(into, strip_option),
826 default
827)]
828#[builder(build_fn(error = "OpenAIError"))]
829pub struct Reasoning {
830 /// Constrains effort on reasoning for
831 /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
832 /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
833 /// reasoning effort can result in faster responses and fewer tokens used
834 /// on reasoning in a response.
835 ///
836 /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
837 #[serde(skip_serializing_if = "Option::is_none")]
838 pub effort: Option<ReasoningEffort>,
839 /// A summary of the reasoning performed by the model. This can be
840 /// useful for debugging and understanding the model's reasoning process.
841 /// One of `auto`, `concise`, or `detailed`.
842 ///
843 /// `concise` is only supported for `computer-use-preview` models.
844 #[serde(skip_serializing_if = "Option::is_none")]
845 pub summary: Option<ReasoningSummary>,
846}
847
848/// o-series reasoning settings.
849#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
850#[serde(rename_all = "lowercase")]
851pub enum Verbosity {
852 Low,
853 Medium,
854 High,
855}
856
857#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
858#[serde(rename_all = "lowercase")]
859pub enum ReasoningSummary {
860 Auto,
861 Concise,
862 Detailed,
863}
864
865/// Configuration for text response format.
866#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
867pub struct ResponseTextParam {
868 /// An object specifying the format that the model must output.
869 ///
870 /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
871 /// which ensures the model will match your supplied JSON schema. Learn more in the
872 /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
873 ///
874 /// The default format is `{ "type": "text" }` with no additional options.
875 ///
876 /// **Not recommended for gpt-4o and newer models:**
877 ///
878 /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
879 /// ensures the message the model generates is valid JSON. Using `json_schema`
880 /// is preferred for models that support it.
881 pub format: TextResponseFormatConfiguration,
882
883 /// Constrains the verbosity of the model's response. Lower values will result in
884 /// more concise responses, while higher values will result in more verbose responses.
885 ///
886 /// Currently supported values are `low`, `medium`, and `high`.
887 #[serde(skip_serializing_if = "Option::is_none")]
888 pub verbosity: Option<Verbosity>,
889}
890
891#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
892#[serde(tag = "type", rename_all = "snake_case")]
893pub enum TextResponseFormatConfiguration {
894 /// Default response format. Used to generate text responses.
895 Text,
896 /// JSON object response format. An older method of generating JSON responses.
897 /// Using `json_schema` is recommended for models that support it.
898 /// Note that the model will not generate JSON without a system or user message
899 /// instructing it to do so.
900 JsonObject,
901 /// JSON Schema response format. Used to generate structured JSON responses.
902 /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
903 JsonSchema(ResponseFormatJsonSchema),
904}
905
906/// Definitions for model-callable tools.
907#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
908#[serde(tag = "type", rename_all = "snake_case")]
909pub enum Tool {
910 /// Defines a function in your own code the model can choose to call. Learn more about [function
911 /// calling](https://platform.openai.com/docs/guides/tools).
912 Function(FunctionTool),
913 /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
914 /// tool](https://platform.openai.com/docs/guides/tools-file-search).
915 FileSearch(FileSearchTool),
916 /// A tool that controls a virtual computer. Learn more about the [computer
917 /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
918 ComputerUsePreview(ComputerUsePreviewTool),
919 /// Search the Internet for sources related to the prompt. Learn more about the
920 /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
921 WebSearch(WebSearchTool),
922 /// type: web_search_2025_08_26
923 #[serde(rename = "web_search_2025_08_26")]
924 WebSearch20250826(WebSearchTool),
925 /// Give the model access to additional tools via remote Model Context Protocol
926 /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
927 Mcp(MCPTool),
928 /// A tool that runs Python code to help generate a response to a prompt.
929 CodeInterpreter(CodeInterpreterTool),
930 /// A tool that generates images using a model like `gpt-image-1`.
931 ImageGeneration(ImageGenTool),
932 /// A tool that allows the model to execute shell commands in a local environment.
933 LocalShell,
934 /// A custom tool that processes input using a specified format. Learn more about [custom
935 /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
936 Custom(CustomToolParam),
937 /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
938 ///tool](https://platform.openai.com/docs/guides/tools-web-search).
939 WebSearchPreview(WebSearchTool),
940 /// type: web_search_preview_2025_03_11
941 #[serde(rename = "web_search_preview_2025_03_11")]
942 WebSearchPreview20250311(WebSearchTool),
943}
944
945#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
946pub struct CustomToolParam {
947 /// The name of the custom tool, used to identify it in tool calls.
948 pub name: String,
949 /// Optional description of the custom tool, used to provide more context.
950 pub description: Option<String>,
951 /// The input format for the custom tool. Default is unconstrained text.
952 pub format: CustomToolParamFormat,
953}
954
955#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
956#[serde(rename_all = "lowercase")]
957pub enum GrammarSyntax {
958 Lark,
959 #[default]
960 Regex,
961}
962
963#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
964pub struct CustomGrammarFormatParam {
965 /// The grammar definition.
966 pub definition: String,
967 /// The syntax of the grammar definition. One of `lark` or `regex`.
968 pub syntax: GrammarSyntax,
969}
970
971#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
972#[serde(tag = "type", rename_all = "lowercase")]
973pub enum CustomToolParamFormat {
974 /// Unconstrained free-form text.
975 #[default]
976 Text,
977 /// A grammar defined by the user.
978 Grammar(CustomGrammarFormatParam),
979}
980
981#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
982#[builder(
983 name = "FileSearchToolArgs",
984 pattern = "mutable",
985 setter(into, strip_option),
986 default
987)]
988#[builder(build_fn(error = "OpenAIError"))]
989pub struct FileSearchTool {
990 /// The IDs of the vector stores to search.
991 pub vector_store_ids: Vec<String>,
992 /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
993 #[serde(skip_serializing_if = "Option::is_none")]
994 pub max_num_results: Option<u32>,
995 /// A filter to apply.
996 #[serde(skip_serializing_if = "Option::is_none")]
997 pub filters: Option<Filter>,
998 /// Ranking options for search.
999 #[serde(skip_serializing_if = "Option::is_none")]
1000 pub ranking_options: Option<RankingOptions>,
1001}
1002
1003#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1004#[builder(
1005 name = "FunctionToolArgs",
1006 pattern = "mutable",
1007 setter(into, strip_option),
1008 default
1009)]
1010pub struct FunctionTool {
1011 /// The name of the function to call.
1012 pub name: String,
1013 /// A JSON schema object describing the parameters of the function.
1014 #[serde(skip_serializing_if = "Option::is_none")]
1015 pub parameters: Option<serde_json::Value>,
1016 /// Whether to enforce strict parameter validation. Default `true`.
1017 #[serde(skip_serializing_if = "Option::is_none")]
1018 pub strict: Option<bool>,
1019 /// A description of the function. Used by the model to determine whether or not to call the
1020 /// function.
1021 #[serde(skip_serializing_if = "Option::is_none")]
1022 pub description: Option<String>,
1023}
1024
1025#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1026pub struct WebSearchToolFilters {
1027 /// Allowed domains for the search. If not provided, all domains are allowed.
1028 /// Subdomains of the provided domains are allowed as well.
1029 ///
1030 /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1031 #[serde(skip_serializing_if = "Option::is_none")]
1032 pub allowed_domains: Option<Vec<String>>,
1033}
1034
1035#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1036#[builder(
1037 name = "WebSearchToolArgs",
1038 pattern = "mutable",
1039 setter(into, strip_option),
1040 default
1041)]
1042pub struct WebSearchTool {
1043 /// Filters for the search.
1044 #[serde(skip_serializing_if = "Option::is_none")]
1045 pub filters: Option<WebSearchToolFilters>,
1046 /// The approximate location of the user.
1047 #[serde(skip_serializing_if = "Option::is_none")]
1048 pub user_location: Option<WebSearchApproximateLocation>,
1049 /// High level guidance for the amount of context window space to use for the search. One of `low`,
1050 /// `medium`, or `high`. `medium` is the default.
1051 #[serde(skip_serializing_if = "Option::is_none")]
1052 pub search_context_size: Option<WebSearchToolSearchContextSize>,
1053}
1054
1055#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1056#[serde(rename_all = "lowercase")]
1057pub enum WebSearchToolSearchContextSize {
1058 Low,
1059 #[default]
1060 Medium,
1061 High,
1062}
1063
1064#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1065#[serde(rename_all = "lowercase")]
1066pub enum ComputerEnvironment {
1067 Windows,
1068 Mac,
1069 Linux,
1070 Ubuntu,
1071 #[default]
1072 Browser,
1073}
1074
1075#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1076#[builder(
1077 name = "ComputerUsePreviewToolArgs",
1078 pattern = "mutable",
1079 setter(into, strip_option),
1080 default
1081)]
1082pub struct ComputerUsePreviewTool {
1083 /// The type of computer environment to control.
1084 environment: ComputerEnvironment,
1085 /// The width of the computer display.
1086 display_width: u32,
1087 /// The height of the computer display.
1088 display_height: u32,
1089}
1090
1091#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1092pub enum RankVersionType {
1093 #[serde(rename = "auto")]
1094 Auto,
1095 #[serde(rename = "default-2024-11-15")]
1096 Default20241115,
1097}
1098
1099#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1100pub struct HybridSearch {
1101 /// The weight of the embedding in the reciprocal ranking fusion.
1102 pub embedding_weight: f32,
1103 /// The weight of the text in the reciprocal ranking fusion.
1104 pub text_weight: f32,
1105}
1106
1107/// Options for search result ranking.
1108#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1109pub struct RankingOptions {
1110 /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1111 /// sparse keyword matches when hybrid search is enabled.
1112 #[serde(skip_serializing_if = "Option::is_none")]
1113 pub hybrid_search: Option<HybridSearch>,
1114 /// The ranker to use for the file search.
1115 pub ranker: RankVersionType,
1116 /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1117 /// attempt to return only the most relevant results, but may return fewer results.
1118 #[serde(skip_serializing_if = "Option::is_none")]
1119 pub score_threshold: Option<f32>,
1120}
1121
1122/// Filters for file search.
1123#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1124#[serde(untagged)]
1125pub enum Filter {
1126 /// A filter used to compare a specified attribute key to a given value using a defined
1127 /// comparison operation.
1128 Comparison(ComparisonFilter),
1129 /// Combine multiple filters using `and` or `or`.
1130 Compound(CompoundFilter),
1131}
1132
1133/// Single comparison filter.
1134#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1135pub struct ComparisonFilter {
1136 /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`.
1137 /// - `eq`: equals
1138 /// - `ne`: not equal
1139 /// - `gt`: greater than
1140 /// - `gte`: greater than or equal
1141 /// - `lt`: less than
1142 /// - `lte`: less than or equal
1143 /// - `in`: in
1144 /// - `nin`: not in
1145 pub r#type: ComparisonType,
1146 /// The key to compare against the value.
1147 pub key: String,
1148 /// The value to compare against the attribute key; supports string, number, or boolean types.
1149 pub value: serde_json::Value,
1150}
1151
1152#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1153pub enum ComparisonType {
1154 #[serde(rename = "eq")]
1155 Equals,
1156 #[serde(rename = "ne")]
1157 NotEquals,
1158 #[serde(rename = "gt")]
1159 GreaterThan,
1160 #[serde(rename = "gte")]
1161 GreaterThanOrEqual,
1162 #[serde(rename = "lt")]
1163 LessThan,
1164 #[serde(rename = "lte")]
1165 LessThanOrEqual,
1166 #[serde(rename = "in")]
1167 In,
1168 #[serde(rename = "nin")]
1169 NotIn,
1170}
1171
1172/// Combine multiple filters using `and` or `or`.
1173#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1174pub struct CompoundFilter {
1175 /// 'Type of operation: `and` or `or`.'
1176 pub r#type: CompoundType,
1177 /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
1178 pub filters: Vec<Filter>,
1179}
1180
1181#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1182#[serde(rename_all = "lowercase")]
1183pub enum CompoundType {
1184 And,
1185 Or,
1186}
1187
1188#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1189#[serde(rename_all = "lowercase")]
1190pub enum WebSearchApproximateLocationType {
1191 #[default]
1192 Approximate,
1193}
1194
1195/// Approximate user location for web search.
1196#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1197#[builder(
1198 name = "WebSearchApproximateLocationArgs",
1199 pattern = "mutable",
1200 setter(into, strip_option),
1201 default
1202)]
1203#[builder(build_fn(error = "OpenAIError"))]
1204pub struct WebSearchApproximateLocation {
1205 /// The type of location approximation. Always `approximate`.
1206 pub r#type: WebSearchApproximateLocationType,
1207 /// Free text input for the city of the user, e.g. `San Francisco`.
1208 #[serde(skip_serializing_if = "Option::is_none")]
1209 pub city: Option<String>,
1210 /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1211 /// e.g. `US`.
1212 #[serde(skip_serializing_if = "Option::is_none")]
1213 pub country: Option<String>,
1214 /// Free text input for the region of the user, e.g. `California`.
1215 #[serde(skip_serializing_if = "Option::is_none")]
1216 pub region: Option<String>,
1217 /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1218 /// `America/Los_Angeles`.
1219 #[serde(skip_serializing_if = "Option::is_none")]
1220 pub timezone: Option<String>,
1221}
1222
1223/// Container configuration for a code interpreter.
1224#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1225#[serde(tag = "type", rename_all = "snake_case")]
1226pub enum CodeInterpreterToolContainer {
1227 /// Configuration for a code interpreter container. Optionally specify the IDs of the
1228 /// files to run the code on.
1229 Auto(CodeInterpreterContainerAuto),
1230
1231 /// The container ID.
1232 #[serde(untagged)]
1233 ContainerID(String),
1234}
1235
1236impl Default for CodeInterpreterToolContainer {
1237 fn default() -> Self {
1238 Self::Auto(CodeInterpreterContainerAuto::default())
1239 }
1240}
1241
1242/// Auto configuration for code interpreter container.
1243#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1244pub struct CodeInterpreterContainerAuto {
1245 /// An optional list of uploaded files to make available to your code.
1246 #[serde(skip_serializing_if = "Option::is_none")]
1247 pub file_ids: Option<Vec<String>>,
1248
1249 #[serde(skip_serializing_if = "Option::is_none")]
1250 pub memory_limit: Option<u64>,
1251}
1252
1253#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1254#[builder(
1255 name = "CodeInterpreterToolArgs",
1256 pattern = "mutable",
1257 setter(into, strip_option),
1258 default
1259)]
1260#[builder(build_fn(error = "OpenAIError"))]
1261pub struct CodeInterpreterTool {
1262 /// The code interpreter container. Can be a container ID or an object that
1263 /// specifies uploaded file IDs to make available to your code.
1264 pub container: CodeInterpreterToolContainer,
1265}
1266
1267#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1268pub struct ImageGenToolInputImageMask {
1269 /// Base64-encoded mask image.
1270 #[serde(skip_serializing_if = "Option::is_none")]
1271 pub image_url: Option<String>,
1272 /// File ID for the mask image.
1273 #[serde(skip_serializing_if = "Option::is_none")]
1274 pub file_id: Option<String>,
1275}
1276
1277#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1278#[serde(rename_all = "lowercase")]
1279pub enum InputFidelity {
1280 #[default]
1281 High,
1282 Low,
1283}
1284
1285#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1286#[serde(rename_all = "lowercase")]
1287pub enum ImageGenToolModeration {
1288 #[default]
1289 Auto,
1290 Low,
1291}
1292
1293/// Image generation tool definition.
1294#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1295#[builder(
1296 name = "ImageGenerationArgs",
1297 pattern = "mutable",
1298 setter(into, strip_option),
1299 default
1300)]
1301#[builder(build_fn(error = "OpenAIError"))]
1302pub struct ImageGenTool {
1303 /// Background type for the generated image. One of `transparent`,
1304 /// `opaque`, or `auto`. Default: `auto`.
1305 #[serde(skip_serializing_if = "Option::is_none")]
1306 pub background: Option<ImageGenToolBackground>,
1307 /// Control how much effort the model will exert to match the style and features, especially facial features,
1308 /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1309 /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1310 #[serde(skip_serializing_if = "Option::is_none")]
1311 pub input_fidelity: Option<InputFidelity>,
1312 /// Optional mask for inpainting. Contains `image_url`
1313 /// (string, optional) and `file_id` (string, optional).
1314 #[serde(skip_serializing_if = "Option::is_none")]
1315 pub input_image_mask: Option<ImageGenToolInputImageMask>,
1316 /// The image generation model to use. Default: `gpt-image-1`.
1317 #[serde(skip_serializing_if = "Option::is_none")]
1318 pub model: Option<String>,
1319 /// Moderation level for the generated image. Default: `auto`.
1320 #[serde(skip_serializing_if = "Option::is_none")]
1321 pub moderation: Option<ImageGenToolModeration>,
1322 /// Compression level for the output image. Default: 100.
1323 #[serde(skip_serializing_if = "Option::is_none")]
1324 pub output_compression: Option<u8>,
1325 /// The output format of the generated image. One of `png`, `webp`, or
1326 /// `jpeg`. Default: `png`.
1327 #[serde(skip_serializing_if = "Option::is_none")]
1328 pub output_format: Option<ImageGenToolOutputFormat>,
1329 /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1330 #[serde(skip_serializing_if = "Option::is_none")]
1331 pub partial_images: Option<u8>,
1332 /// The quality of the generated image. One of `low`, `medium`, `high`,
1333 /// or `auto`. Default: `auto`.
1334 #[serde(skip_serializing_if = "Option::is_none")]
1335 pub quality: Option<ImageGenToolQuality>,
1336 /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1337 /// `1536x1024`, or `auto`. Default: `auto`.
1338 #[serde(skip_serializing_if = "Option::is_none")]
1339 pub size: Option<ImageGenToolSize>,
1340}
1341
1342#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1343#[serde(rename_all = "lowercase")]
1344pub enum ImageGenToolBackground {
1345 Transparent,
1346 Opaque,
1347 #[default]
1348 Auto,
1349}
1350
1351#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1352#[serde(rename_all = "lowercase")]
1353pub enum ImageGenToolOutputFormat {
1354 #[default]
1355 Png,
1356 Webp,
1357 Jpeg,
1358}
1359
1360#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1361#[serde(rename_all = "lowercase")]
1362pub enum ImageGenToolQuality {
1363 Low,
1364 Medium,
1365 High,
1366 #[default]
1367 Auto,
1368}
1369
1370#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1371#[serde(rename_all = "lowercase")]
1372pub enum ImageGenToolSize {
1373 #[default]
1374 Auto,
1375 #[serde(rename = "1024x1024")]
1376 Size1024x1024,
1377 #[serde(rename = "1024x1536")]
1378 Size1024x1536,
1379 #[serde(rename = "1536x1024")]
1380 Size1536x1024,
1381}
1382
1383#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1384#[serde(rename_all = "lowercase")]
1385pub enum ToolChoiceAllowedMode {
1386 Auto,
1387 Required,
1388}
1389
1390#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1391pub struct ToolChoiceAllowed {
1392 /// Constrains the tools available to the model to a pre-defined set.
1393 ///
1394 /// `auto` allows the model to pick from among the allowed tools and generate a
1395 /// message.
1396 ///
1397 /// `required` requires the model to call one or more of the allowed tools.
1398 pub mode: ToolChoiceAllowedMode,
1399 /// A list of tool definitions that the model should be allowed to call.
1400 ///
1401 /// For the Responses API, the list of tool definitions might look like:
1402 /// ```json
1403 /// [
1404 /// { "type": "function", "name": "get_weather" },
1405 /// { "type": "mcp", "server_label": "deepwiki" },
1406 /// { "type": "image_generation" }
1407 /// ]
1408 /// ```
1409 pub tools: Vec<serde_json::Value>,
1410}
1411
1412/// The type of hosted tool the model should to use. Learn more about
1413/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1414#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1415#[serde(tag = "type", rename_all = "snake_case")]
1416pub enum ToolChoiceTypes {
1417 FileSearch,
1418 WebSearchPreview,
1419 ComputerUsePreview,
1420 CodeInterpreter,
1421 ImageGeneration,
1422}
1423
1424#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1425pub struct ToolChoiceFunction {
1426 /// The name of the function to call.
1427 name: String,
1428}
1429
1430#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1431pub struct ToolChoiceMCP {
1432 /// The name of the tool to call on the server.
1433 name: String,
1434 /// The label of the MCP server to use.
1435 server_label: String,
1436}
1437
1438#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1439pub struct ToolChoiceCustom {
1440 /// The name of the custom tool to call.
1441 name: String,
1442}
1443
1444#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1445#[serde(tag = "type", rename_all = "snake_case")]
1446pub enum ToolChoiceParam {
1447 /// Constrains the tools available to the model to a pre-defined set.
1448 AllowedTools(ToolChoiceAllowed),
1449
1450 /// Use this option to force the model to call a specific function.
1451 Function(ToolChoiceFunction),
1452
1453 /// Use this option to force the model to call a specific tool on a remote MCP server.
1454 Mcp(ToolChoiceMCP),
1455
1456 /// Use this option to force the model to call a custom tool.
1457 Custom(ToolChoiceCustom),
1458
1459 /// Indicates that the model should use a built-in tool to generate a response.
1460 /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1461 #[serde(untagged)]
1462 Hosted(ToolChoiceTypes),
1463
1464 /// Controls which (if any) tool is called by the model.
1465 ///
1466 /// `none` means the model will not call any tool and instead generates a message.
1467 ///
1468 /// `auto` means the model can pick between generating a message or calling one or
1469 /// more tools.
1470 ///
1471 /// `required` means the model must call one or more tools.
1472 #[serde(untagged)]
1473 Mode(ToolChoiceOptions),
1474}
1475
1476#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1477#[serde(rename_all = "lowercase")]
1478pub enum ToolChoiceOptions {
1479 None,
1480 Auto,
1481 Required,
1482}
1483
1484/// Error returned by the API when a request fails.
1485#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1486pub struct ErrorObject {
1487 /// The error code for the response.
1488 pub code: String,
1489 /// A human-readable description of the error.
1490 pub message: String,
1491}
1492
1493/// Details about an incomplete response.
1494#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1495pub struct IncompleteDetails {
1496 /// The reason why the response is incomplete.
1497 pub reason: String,
1498}
1499
1500#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1501pub struct TopLogProb {
1502 pub bytes: Vec<u8>,
1503 pub logprob: f64,
1504 pub token: String,
1505}
1506
1507#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1508pub struct LogProb {
1509 pub bytes: Vec<u8>,
1510 pub logprob: f64,
1511 pub token: String,
1512 pub top_logprobs: Vec<TopLogProb>,
1513}
1514
1515#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1516pub struct ResponseTopLobProb {
1517 /// The log probability of this token.
1518 pub logprob: f64,
1519 /// A possible text token.
1520 pub token: String,
1521}
1522
1523#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1524pub struct ResponseLogProb {
1525 /// The log probability of this token.
1526 pub logprob: f64,
1527 /// A possible text token.
1528 pub token: String,
1529 /// The log probability of the top 20 most likely tokens.
1530 pub top_logprobs: Vec<ResponseTopLobProb>,
1531}
1532
1533/// A simple text output from the model.
1534#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1535pub struct OutputTextContent {
1536 /// The annotations of the text output.
1537 pub annotations: Vec<Annotation>,
1538 pub logprobs: Option<Vec<LogProb>>,
1539 /// The text output from the model.
1540 pub text: String,
1541}
1542
1543#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1544#[serde(tag = "type", rename_all = "snake_case")]
1545pub enum Annotation {
1546 /// A citation to a file.
1547 FileCitation(FileCitationBody),
1548 /// A citation for a web resource used to generate a model response.
1549 UrlCitation(UrlCitationBody),
1550 /// A citation for a container file used to generate a model response.
1551 ContainerFileCitation(ContainerFileCitationBody),
1552 /// A path to a file.
1553 FilePath(FilePath),
1554}
1555
1556#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1557pub struct FileCitationBody {
1558 /// The ID of the file.
1559 file_id: String,
1560 /// The filename of the file cited.
1561 filename: String,
1562 /// The index of the file in the list of files.
1563 index: u32,
1564}
1565
1566#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1567pub struct UrlCitationBody {
1568 /// The index of the last character of the URL citation in the message.
1569 end_index: u32,
1570 /// The index of the first character of the URL citation in the message.
1571 start_index: u32,
1572 /// The title of the web resource.
1573 title: String,
1574 /// The URL of the web resource.
1575 url: String,
1576}
1577
1578#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1579pub struct ContainerFileCitationBody {
1580 /// The ID of the container file.
1581 container_id: String,
1582 /// The index of the last character of the container file citation in the message.
1583 end_index: u32,
1584 /// The ID of the file.
1585 file_id: String,
1586 /// The filename of the container file cited.
1587 filename: String,
1588 /// The index of the first character of the container file citation in the message.
1589 start_index: u32,
1590}
1591
1592#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1593pub struct FilePath {
1594 /// The ID of the file.
1595 file_id: String,
1596 /// The index of the file in the list of files.
1597 index: u32,
1598}
1599
1600/// A refusal explanation from the model.
1601#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1602pub struct RefusalContent {
1603 /// The refusal explanation from the model.
1604 pub refusal: String,
1605}
1606
1607/// A message generated by the model.
1608#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1609pub struct OutputMessage {
1610 /// The content of the output message.
1611 pub content: Vec<OutputMessageContent>,
1612 /// The unique ID of the output message.
1613 pub id: String,
1614 /// The role of the output message. Always `assistant`.
1615 pub role: AssistantRole,
1616 /// The status of the message input. One of `in_progress`, `completed`, or
1617 /// `incomplete`. Populated when input items are returned via API.
1618 pub status: OutputStatus,
1619 ///// The type of the output message. Always `message`.
1620 //pub r#type: MessageType,
1621}
1622
1623#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1624#[serde(rename_all = "lowercase")]
1625pub enum MessageType {
1626 #[default]
1627 Message,
1628}
1629
1630/// The role for an output message - always `assistant`.
1631/// This type ensures type safety by only allowing the assistant role.
1632#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1633#[serde(rename_all = "lowercase")]
1634pub enum AssistantRole {
1635 #[default]
1636 Assistant,
1637}
1638
1639#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1640#[serde(tag = "type", rename_all = "snake_case")]
1641pub enum OutputMessageContent {
1642 /// A text output from the model.
1643 OutputText(OutputTextContent),
1644 /// A refusal from the model.
1645 Refusal(RefusalContent),
1646}
1647
1648#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1649#[serde(tag = "type", rename_all = "snake_case")]
1650pub enum OutputContent {
1651 /// A text output from the model.
1652 OutputText(OutputTextContent),
1653 /// A refusal from the model.
1654 Refusal(RefusalContent),
1655 /// Reasoning text from the model.
1656 ReasoningText(ReasoningTextContent),
1657}
1658
1659#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1660pub struct ReasoningTextContent {
1661 /// The reasoning text from the model.
1662 pub text: String,
1663}
1664
1665/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1666#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1667pub struct ReasoningItem {
1668 /// Unique identifier of the reasoning content.
1669 pub id: String,
1670 /// Reasoning summary content.
1671 pub summary: Vec<SummaryPart>,
1672 /// Reasoning text content.
1673 #[serde(skip_serializing_if = "Option::is_none")]
1674 pub content: Option<Vec<ReasoningTextContent>>,
1675 /// The encrypted content of the reasoning item - populated when a response is generated with
1676 /// `reasoning.encrypted_content` in the `include` parameter.
1677 #[serde(skip_serializing_if = "Option::is_none")]
1678 pub encrypted_content: Option<String>,
1679 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1680 /// Populated when items are returned via API.
1681 #[serde(skip_serializing_if = "Option::is_none")]
1682 pub status: Option<OutputStatus>,
1683}
1684
1685/// A single summary text fragment from reasoning.
1686#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1687pub struct Summary {
1688 /// A summary of the reasoning output from the model so far.
1689 pub text: String,
1690}
1691
1692#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1693#[serde(tag = "type", rename_all = "snake_case")]
1694pub enum SummaryPart {
1695 SummaryText(Summary),
1696}
1697
1698/// File search tool call output.
1699#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1700pub struct FileSearchToolCall {
1701 /// The unique ID of the file search tool call.
1702 pub id: String,
1703 /// The queries used to search for files.
1704 pub queries: Vec<String>,
1705 /// The status of the file search tool call. One of `in_progress`, `searching`,
1706 /// `incomplete`,`failed`, or `completed`.
1707 pub status: FileSearchToolCallStatus,
1708 /// The results of the file search tool call.
1709 #[serde(skip_serializing_if = "Option::is_none")]
1710 pub results: Option<Vec<FileSearchToolCallResult>>,
1711}
1712
1713#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1714#[serde(rename_all = "snake_case")]
1715pub enum FileSearchToolCallStatus {
1716 InProgress,
1717 Searching,
1718 Incomplete,
1719 Failed,
1720 Completed,
1721}
1722
1723/// A single result from a file search.
1724#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1725pub struct FileSearchToolCallResult {
1726 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1727 /// additional information about the object in a structured format, and querying for objects
1728 /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1729 /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1730 pub attributes: HashMap<String, serde_json::Value>,
1731 /// The unique ID of the file.
1732 pub file_id: String,
1733 /// The name of the file.
1734 pub filename: String,
1735 /// The relevance score of the file - a value between 0 and 1.
1736 pub score: f32,
1737 /// The text that was retrieved from the file.
1738 pub text: String,
1739}
1740
1741#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1742pub struct ComputerCallSafetyCheckParam {
1743 /// The ID of the pending safety check.
1744 pub id: String,
1745 /// The type of the pending safety check.
1746 #[serde(skip_serializing_if = "Option::is_none")]
1747 pub code: Option<String>,
1748 /// Details about the pending safety check.
1749 #[serde(skip_serializing_if = "Option::is_none")]
1750 pub message: Option<String>,
1751}
1752
1753#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1754#[serde(rename_all = "snake_case")]
1755pub enum WebSearchToolCallStatus {
1756 InProgress,
1757 Searching,
1758 Completed,
1759 Failed,
1760}
1761
1762#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1763pub struct WebSearchActionSearchSource {
1764 /// The type of source. Always `url`.
1765 pub r#type: String,
1766 /// The URL of the source.
1767 pub url: String,
1768}
1769
1770#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1771pub struct WebSearchActionSearch {
1772 /// The search query.
1773 pub query: String,
1774 /// The sources used in the search.
1775 pub sources: Option<Vec<WebSearchActionSearchSource>>,
1776}
1777
1778#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1779pub struct WebSearchActionOpenPage {
1780 /// The URL opened by the model.
1781 pub url: String,
1782}
1783
1784#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1785pub struct WebSearchActionFind {
1786 /// The URL of the page searched for the pattern.
1787 pub url: String,
1788 /// The pattern or text to search for within the page.
1789 pub pattern: String,
1790}
1791
1792#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1793#[serde(tag = "type", rename_all = "snake_case")]
1794pub enum WebSearchToolCallAction {
1795 /// Action type "search" - Performs a web search query.
1796 Search(WebSearchActionSearch),
1797 /// Action type "open_page" - Opens a specific URL from search results.
1798 OpenPage(WebSearchActionOpenPage),
1799 /// Action type "find": Searches for a pattern within a loaded page.
1800 Find(WebSearchActionFind),
1801}
1802
1803/// Web search tool call output.
1804#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1805pub struct WebSearchToolCall {
1806 /// An object describing the specific action taken in this web search call. Includes
1807 /// details on how the model used the web (search, open_page, find).
1808 pub action: WebSearchToolCallAction,
1809 /// The unique ID of the web search tool call.
1810 pub id: String,
1811 /// The status of the web search tool call.
1812 pub status: WebSearchToolCallStatus,
1813}
1814
1815/// Output from a computer tool call.
1816#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1817pub struct ComputerToolCall {
1818 pub action: ComputerAction,
1819 /// An identifier used when responding to the tool call with output.
1820 pub call_id: String,
1821 /// The unique ID of the computer call.
1822 pub id: String,
1823 /// The pending safety checks for the computer call.
1824 pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1825 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1826 /// Populated when items are returned via API.
1827 pub status: OutputStatus,
1828}
1829
1830/// A point in 2D space.
1831#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1832pub struct DragPoint {
1833 /// The x-coordinate.
1834 pub x: i32,
1835 /// The y-coordinate.
1836 pub y: i32,
1837}
1838
1839/// Represents all user‐triggered actions.
1840#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1841#[serde(tag = "type", rename_all = "snake_case")]
1842pub enum ComputerAction {
1843 /// A click action.
1844 Click(ClickParam),
1845
1846 /// A double click action.
1847 DoubleClick(DoubleClickAction),
1848
1849 /// A drag action.
1850 Drag(Drag),
1851
1852 /// A collection of keypresses the model would like to perform.
1853 Keypress(KeyPressAction),
1854
1855 /// A mouse move action.
1856 Move(Move),
1857
1858 /// A screenshot action.
1859 Screenshot,
1860
1861 /// A scroll action.
1862 Scroll(Scroll),
1863
1864 /// An action to type in text.
1865 Type(Type),
1866
1867 /// A wait action.
1868 Wait,
1869}
1870
1871#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1872#[serde(rename_all = "lowercase")]
1873pub enum ClickButtonType {
1874 Left,
1875 Right,
1876 Wheel,
1877 Back,
1878 Forward,
1879}
1880
1881/// A click action.
1882#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1883pub struct ClickParam {
1884 /// Indicates which mouse button was pressed during the click. One of `left`,
1885 /// `right`, `wheel`, `back`, or `forward`.
1886 pub button: ClickButtonType,
1887 /// The x-coordinate where the click occurred.
1888 pub x: i32,
1889 /// The y-coordinate where the click occurred.
1890 pub y: i32,
1891}
1892
1893/// A double click action.
1894#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1895pub struct DoubleClickAction {
1896 /// The x-coordinate where the double click occurred.
1897 pub x: i32,
1898 /// The y-coordinate where the double click occurred.
1899 pub y: i32,
1900}
1901
1902/// A drag action.
1903#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1904pub struct Drag {
1905 /// The path of points the cursor drags through.
1906 pub path: Vec<DragPoint>,
1907}
1908
1909/// A keypress action.
1910#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1911pub struct KeyPressAction {
1912 /// The combination of keys the model is requesting to be pressed.
1913 /// This is an array of strings, each representing a key.
1914 pub keys: Vec<String>,
1915}
1916
1917/// A mouse move action.
1918#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1919pub struct Move {
1920 /// The x-coordinate to move to.
1921 pub x: i32,
1922 /// The y-coordinate to move to.
1923 pub y: i32,
1924}
1925
1926/// A scroll action.
1927#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1928pub struct Scroll {
1929 /// The horizontal scroll distance.
1930 pub scroll_x: i32,
1931 /// The vertical scroll distance.
1932 pub scroll_y: i32,
1933 /// The x-coordinate where the scroll occurred.
1934 pub x: i32,
1935 /// The y-coordinate where the scroll occurred.
1936 pub y: i32,
1937}
1938
1939/// A typing (text entry) action.
1940#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1941pub struct Type {
1942 /// The text to type.
1943 pub text: String,
1944}
1945
1946#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1947pub struct FunctionToolCall {
1948 /// A JSON string of the arguments to pass to the function.
1949 pub arguments: String,
1950 /// The unique ID of the function tool call generated by the model.
1951 pub call_id: String,
1952 /// The name of the function to run.
1953 pub name: String,
1954 /// The unique ID of the function tool call.
1955 #[serde(skip_serializing_if = "Option::is_none")]
1956 pub id: Option<String>,
1957 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1958 /// Populated when items are returned via API.
1959 #[serde(skip_serializing_if = "Option::is_none")]
1960 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1961}
1962
1963#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1964#[serde(rename_all = "snake_case")]
1965pub enum ImageGenToolCallStatus {
1966 InProgress,
1967 Completed,
1968 Generating,
1969 Failed,
1970}
1971
1972#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1973pub struct ImageGenToolCall {
1974 /// The unique ID of the image generation call.
1975 pub id: String,
1976 /// The generated image encoded in base64.
1977 pub result: Option<String>,
1978 /// The status of the image generation call.
1979 pub status: ImageGenToolCallStatus,
1980}
1981
1982#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1983#[serde(rename_all = "snake_case")]
1984pub enum CodeInterpreterToolCallStatus {
1985 InProgress,
1986 Completed,
1987 Incomplete,
1988 Interpreting,
1989 Failed,
1990}
1991
1992/// Output of a code interpreter request.
1993#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1994pub struct CodeInterpreterToolCall {
1995 /// The code to run, or null if not available.
1996 #[serde(skip_serializing_if = "Option::is_none")]
1997 pub code: Option<String>,
1998 /// ID of the container used to run the code.
1999 pub container_id: String,
2000 /// The unique ID of the code interpreter tool call.
2001 pub id: String,
2002 /// The outputs generated by the code interpreter, such as logs or images.
2003 /// Can be null if no outputs are available.
2004 #[serde(skip_serializing_if = "Option::is_none")]
2005 pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
2006 /// The status of the code interpreter tool call.
2007 /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
2008 pub status: CodeInterpreterToolCallStatus,
2009}
2010
2011/// Individual result from a code interpreter: either logs or files.
2012#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2013#[serde(tag = "type", rename_all = "snake_case")]
2014pub enum CodeInterpreterToolCallOutput {
2015 /// Code interpreter output logs
2016 Logs(CodeInterpreterOutputLogs),
2017 /// Code interpreter output image
2018 Image(CodeInterpreterOutputImage),
2019}
2020
2021#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2022pub struct CodeInterpreterOutputLogs {
2023 /// The logs output from the code interpreter.
2024 pub logs: String,
2025}
2026
2027#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2028pub struct CodeInterpreterOutputImage {
2029 /// The URL of the image output from the code interpreter.
2030 pub url: String,
2031}
2032
2033#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2034pub struct CodeInterpreterFile {
2035 /// The ID of the file.
2036 file_id: String,
2037 /// The MIME type of the file.
2038 mime_type: String,
2039}
2040
2041#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2042pub struct LocalShellToolCall {
2043 /// Execute a shell command on the server.
2044 pub action: LocalShellExecAction,
2045 /// The unique ID of the local shell tool call generated by the model.
2046 pub call_id: String,
2047 /// The unique ID of the local shell call.
2048 pub id: String,
2049 /// The status of the local shell call.
2050 pub status: OutputStatus,
2051}
2052
2053/// Define the shape of a local shell action (exec).
2054#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2055pub struct LocalShellExecAction {
2056 /// The command to run.
2057 pub command: Vec<String>,
2058 /// Environment variables to set for the command.
2059 pub env: HashMap<String, String>,
2060 /// Optional timeout in milliseconds for the command.
2061 pub timeout_ms: Option<u64>,
2062 /// Optional user to run the command as.
2063 pub user: Option<String>,
2064 /// Optional working directory to run the command in.
2065 pub working_directory: Option<String>,
2066}
2067
2068/// Output of an MCP server tool invocation.
2069#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2070pub struct MCPToolCall {
2071 /// A JSON string of the arguments passed to the tool.
2072 pub arguments: String,
2073 /// The unique ID of the tool call.
2074 pub id: String,
2075 /// The name of the tool that was run.
2076 pub name: String,
2077 /// The label of the MCP server running the tool.
2078 pub server_label: String,
2079 /// Unique identifier for the MCP tool call approval request. Include this value
2080 /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2081 /// tool call.
2082 pub approval_request_id: Option<String>,
2083 /// Error message from the call, if any.
2084 pub error: Option<String>,
2085 /// The output from the tool call.
2086 pub output: Option<String>,
2087 /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2088 /// `calling`, or `failed`.
2089 pub status: Option<MCPToolCallStatus>,
2090}
2091
2092#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2093#[serde(rename_all = "snake_case")]
2094pub enum MCPToolCallStatus {
2095 InProgress,
2096 Completed,
2097 Incomplete,
2098 Calling,
2099 Failed,
2100}
2101
2102#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2103pub struct MCPListTools {
2104 /// The unique ID of the list.
2105 pub id: String,
2106 /// The label of the MCP server.
2107 pub server_label: String,
2108 /// The tools available on the server.
2109 pub tools: Vec<MCPListToolsTool>,
2110 /// Error message if listing failed.
2111 #[serde(skip_serializing_if = "Option::is_none")]
2112 pub error: Option<String>,
2113}
2114
2115#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2116pub struct MCPApprovalRequest {
2117 /// JSON string of arguments for the tool.
2118 pub arguments: String,
2119 /// The unique ID of the approval request.
2120 pub id: String,
2121 /// The name of the tool to run.
2122 pub name: String,
2123 /// The label of the MCP server making the request.
2124 pub server_label: String,
2125}
2126
2127#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2128pub struct InputTokenDetails {
2129 /// The number of tokens that were retrieved from the cache.
2130 /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
2131 pub cached_tokens: u32,
2132}
2133
2134#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2135pub struct OutputTokenDetails {
2136 /// The number of reasoning tokens.
2137 pub reasoning_tokens: u32,
2138}
2139
2140/// Usage statistics for a response.
2141#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2142pub struct ResponseUsage {
2143 /// The number of input tokens.
2144 pub input_tokens: u32,
2145 /// A detailed breakdown of the input tokens.
2146 pub input_tokens_details: InputTokenDetails,
2147 /// The number of output tokens.
2148 pub output_tokens: u32,
2149 /// A detailed breakdown of the output tokens.
2150 pub output_tokens_details: OutputTokenDetails,
2151 /// The total number of tokens used.
2152 pub total_tokens: u32,
2153}
2154
2155#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2156#[serde(untagged)]
2157pub enum Instructions {
2158 /// A text input to the model, equivalent to a text input with the `developer` role.
2159 Text(String),
2160 /// A list of one or many input items to the model, containing different content types.
2161 Array(Vec<InputItem>),
2162}
2163
2164/// The complete response returned by the Responses API.
2165#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2166pub struct Response {
2167 /// Whether to run the model response in the background.
2168 /// [Learn more](https://platform.openai.com/docs/guides/background).
2169 #[serde(skip_serializing_if = "Option::is_none")]
2170 pub background: Option<bool>,
2171
2172 /// Billing information for the response.
2173 #[serde(skip_serializing_if = "Option::is_none")]
2174 pub billing: Option<Billing>,
2175
2176 /// The conversation that this response belongs to. Input items and output
2177 /// items from this response are automatically added to this conversation.
2178 #[serde(skip_serializing_if = "Option::is_none")]
2179 pub conversation: Option<Conversation>,
2180
2181 /// Unix timestamp (in seconds) when this Response was created.
2182 pub created_at: u64,
2183
2184 /// An error object returned when the model fails to generate a Response.
2185 #[serde(skip_serializing_if = "Option::is_none")]
2186 pub error: Option<ErrorObject>,
2187
2188 /// Unique identifier for this response.
2189 pub id: String,
2190
2191 /// Details about why the response is incomplete, if any.
2192 #[serde(skip_serializing_if = "Option::is_none")]
2193 pub incomplete_details: Option<IncompleteDetails>,
2194
2195 /// A system (or developer) message inserted into the model's context.
2196 ///
2197 /// When using along with `previous_response_id`, the instructions from a previous response
2198 /// will not be carried over to the next response. This makes it simple to swap out
2199 /// system (or developer) messages in new responses.
2200 #[serde(skip_serializing_if = "Option::is_none")]
2201 pub instructions: Option<Instructions>,
2202
2203 /// An upper bound for the number of tokens that can be generated for a response,
2204 /// including visible output tokens and
2205 /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2206 #[serde(skip_serializing_if = "Option::is_none")]
2207 pub max_output_tokens: Option<u32>,
2208
2209 /// Set of 16 key-value pairs that can be attached to an object. This can be
2210 /// useful for storing additional information about the object in a structured
2211 /// format, and querying for objects via API or the dashboard.
2212 ///
2213 /// Keys are strings with a maximum length of 64 characters. Values are strings
2214 /// with a maximum length of 512 characters.
2215 #[serde(skip_serializing_if = "Option::is_none")]
2216 pub metadata: Option<HashMap<String, String>>,
2217
2218 /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2219 /// wide range of models with different capabilities, performance characteristics,
2220 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2221 pub model: String,
2222
2223 /// The object type of this resource - always set to `response`.
2224 pub object: String,
2225
2226 /// An array of content items generated by the model.
2227 ///
2228 /// - The length and order of items in the output array is dependent on the model's response.
2229 /// - Rather than accessing the first item in the output array and assuming it's an assistant
2230 /// message with the content generated by the model, you might consider using
2231 /// the `output_text` property where supported in SDKs.
2232 pub output: Vec<OutputItem>,
2233
2234 /// SDK-only convenience property that contains the aggregated text output from all
2235 /// `output_text` items in the `output` array, if any are present.
2236 /// Supported in the Python and JavaScript SDKs.
2237 // #[serde(skip_serializing_if = "Option::is_none")]
2238 // pub output_text: Option<String>,
2239
2240 /// Whether to allow the model to run tool calls in parallel.
2241 #[serde(skip_serializing_if = "Option::is_none")]
2242 pub parallel_tool_calls: Option<bool>,
2243
2244 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2245 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2246 /// Cannot be used in conjunction with `conversation`.
2247 #[serde(skip_serializing_if = "Option::is_none")]
2248 pub previous_response_id: Option<String>,
2249
2250 /// Reference to a prompt template and its variables.
2251 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2252 #[serde(skip_serializing_if = "Option::is_none")]
2253 pub prompt: Option<Prompt>,
2254
2255 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2256 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2257 #[serde(skip_serializing_if = "Option::is_none")]
2258 pub prompt_cache_key: Option<String>,
2259
2260 /// **gpt-5 and o-series models only**
2261 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2262 #[serde(skip_serializing_if = "Option::is_none")]
2263 pub reasoning: Option<Reasoning>,
2264
2265 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2266 /// usage policies.
2267 ///
2268 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2269 /// or email address, in order to avoid sending us any identifying information. [Learn
2270 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2271 #[serde(skip_serializing_if = "Option::is_none")]
2272 pub safety_identifier: Option<String>,
2273
2274 /// Specifies the processing type used for serving the request.
2275 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2276 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2277 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2278 /// - When not set, the default behavior is 'auto'.
2279 ///
2280 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2281 #[serde(skip_serializing_if = "Option::is_none")]
2282 pub service_tier: Option<ServiceTier>,
2283
2284 /// The status of the response generation.
2285 /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2286 pub status: Status,
2287
2288 /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2289 /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2290 ///
2291 /// We generally recommend altering this or `top_p` but not both.
2292 #[serde(skip_serializing_if = "Option::is_none")]
2293 pub temperature: Option<f32>,
2294
2295 /// Configuration options for a text response from the model. Can be plain
2296 /// text or structured JSON data. Learn more:
2297 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2298 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2299 #[serde(skip_serializing_if = "Option::is_none")]
2300 pub text: Option<ResponseTextParam>,
2301
2302 /// How the model should select which tool (or tools) to use when generating
2303 /// a response. See the `tools` parameter to see how to specify which tools
2304 /// the model can call.
2305 #[serde(skip_serializing_if = "Option::is_none")]
2306 pub tool_choice: Option<ToolChoiceParam>,
2307
2308 /// An array of tools the model may call while generating a response. You
2309 /// can specify which tool to use by setting the `tool_choice` parameter.
2310 ///
2311 /// We support the following categories of tools:
2312 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2313 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2314 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2315 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
2316 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2317 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
2318 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2319 /// - **Function calls (custom tools)**: Functions that are defined by you,
2320 /// enabling the model to call your own code with strongly typed arguments
2321 /// and outputs. Learn more about
2322 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2323 /// custom tools to call your own code.
2324 #[serde(skip_serializing_if = "Option::is_none")]
2325 pub tools: Option<Vec<Tool>>,
2326
2327 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2328 /// token position, each with an associated log probability.
2329 #[serde(skip_serializing_if = "Option::is_none")]
2330 pub top_logprobs: Option<u8>,
2331
2332 /// An alternative to sampling with temperature, called nucleus sampling,
2333 /// where the model considers the results of the tokens with top_p probability
2334 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2335 /// are considered.
2336 ///
2337 /// We generally recommend altering this or `temperature` but not both.
2338 #[serde(skip_serializing_if = "Option::is_none")]
2339 pub top_p: Option<f32>,
2340
2341 ///The truncation strategy to use for the model response.
2342 /// - `auto`: If the input to this Response exceeds
2343 /// the model's context window size, the model will truncate the
2344 /// response to fit the context window by dropping items from the beginning of the conversation.
2345 /// - `disabled` (default): If the input size will exceed the context window
2346 /// size for a model, the request will fail with a 400 error.
2347 #[serde(skip_serializing_if = "Option::is_none")]
2348 pub truncation: Option<Truncation>,
2349
2350 /// Represents token usage details including input tokens, output tokens,
2351 /// a breakdown of output tokens, and the total tokens used.
2352 #[serde(skip_serializing_if = "Option::is_none")]
2353 pub usage: Option<ResponseUsage>,
2354}
2355
2356impl Response {
2357 /// SDK-only convenience property that contains the aggregated text output from all
2358 /// `output_text` items in the `output` array, if any are present.
2359 pub fn output_text(&self) -> Option<String> {
2360 let output = self
2361 .output
2362 .iter()
2363 .filter_map(|item| match item {
2364 OutputItem::Message(msg) => Some(
2365 msg.content
2366 .iter()
2367 .filter_map(|content| match content {
2368 OutputMessageContent::OutputText(ot) => Some(ot.text.clone()),
2369 _ => None,
2370 })
2371 .collect::<Vec<String>>(),
2372 ),
2373 _ => None,
2374 })
2375 .flatten()
2376 .collect::<Vec<String>>()
2377 .join("");
2378 if output.is_empty() {
2379 None
2380 } else {
2381 Some(output)
2382 }
2383 }
2384}
2385
2386#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2387#[serde(rename_all = "snake_case")]
2388pub enum Status {
2389 Completed,
2390 Failed,
2391 InProgress,
2392 Cancelled,
2393 Queued,
2394 Incomplete,
2395}
2396
2397/// Output item
2398#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2399#[serde(tag = "type")]
2400#[serde(rename_all = "snake_case")]
2401pub enum OutputItem {
2402 /// An output message from the model.
2403 Message(OutputMessage),
2404 /// The results of a file search tool call. See the
2405 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2406 /// for more information.
2407 FileSearchCall(FileSearchToolCall),
2408 /// A tool call to run a function. See the
2409 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2410 /// for more information.
2411 FunctionCall(FunctionToolCall),
2412 /// The results of a web search tool call. See the
2413 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2414 /// for more information.
2415 WebSearchCall(WebSearchToolCall),
2416 /// A tool call to a computer use tool. See the
2417 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2418 /// for more information.
2419 ComputerCall(ComputerToolCall),
2420 /// A description of the chain of thought used by a reasoning model while generating
2421 /// a response. Be sure to include these items in your `input` to the Responses API for
2422 /// subsequent turns of a conversation if you are manually
2423 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2424 Reasoning(ReasoningItem),
2425 /// An image generation request made by the model.
2426 ImageGenerationCall(ImageGenToolCall),
2427 /// A tool call to run code.
2428 CodeInterpreterCall(CodeInterpreterToolCall),
2429 /// A tool call to run a command on the local shell.
2430 LocalShellCall(LocalShellToolCall),
2431 /// An invocation of a tool on an MCP server.
2432 McpCall(MCPToolCall),
2433 /// A list of tools available on an MCP server.
2434 McpListTools(MCPListTools),
2435 /// A request for human approval of a tool invocation.
2436 McpApprovalRequest(MCPApprovalRequest),
2437 /// A call to a custom tool created by the model.
2438 CustomToolCall(CustomToolCall),
2439}
2440
2441#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2442#[non_exhaustive]
2443pub struct CustomToolCall {
2444 /// An identifier used to map this custom tool call to a tool call output.
2445 pub call_id: String,
2446 /// The input for the custom tool call generated by the model.
2447 pub input: String,
2448 /// The name of the custom tool being called.
2449 pub name: String,
2450 /// The unique ID of the custom tool call in the OpenAI platform.
2451 pub id: String,
2452}
2453
2454#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2455pub struct DeleteResponse {
2456 pub object: String,
2457 pub deleted: bool,
2458 pub id: String,
2459}
2460
2461#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2462pub struct AnyItemReference {
2463 pub r#type: Option<String>,
2464 pub id: String,
2465}
2466
2467#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2468#[serde(tag = "type", rename_all = "snake_case")]
2469pub enum ItemResourceItem {
2470 Message(MessageItem),
2471 FileSearchCall(FileSearchToolCall),
2472 ComputerCall(ComputerToolCall),
2473 ComputerCallOutput(ComputerCallOutputItemParam),
2474 WebSearchCall(WebSearchToolCall),
2475 FunctionCall(FunctionToolCall),
2476 FunctionCallOutput(FunctionCallOutputItemParam),
2477 ImageGenerationCall(ImageGenToolCall),
2478 CodeInterpreterCall(CodeInterpreterToolCall),
2479 LocalShellCall(LocalShellToolCall),
2480 LocalShellCallOutput(LocalShellToolCallOutput),
2481 McpListTools(MCPListTools),
2482 McpApprovalRequest(MCPApprovalRequest),
2483 McpApprovalResponse(MCPApprovalResponse),
2484 McpCall(MCPToolCall),
2485}
2486
2487#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2488#[serde(untagged)]
2489pub enum ItemResource {
2490 ItemReference(AnyItemReference),
2491 Item(ItemResourceItem),
2492}
2493
2494/// A list of Response items.
2495#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2496pub struct ResponseItemList {
2497 /// The type of object returned, must be `list`.
2498 pub object: String,
2499 /// The ID of the first item in the list.
2500 pub first_id: Option<String>,
2501 /// The ID of the last item in the list.
2502 pub last_id: Option<String>,
2503 /// Whether there are more items in the list.
2504 pub has_more: bool,
2505 /// The list of items.
2506 pub data: Vec<ItemResource>,
2507}
2508
2509#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2510#[builder(
2511 name = "TokenCountsBodyArgs",
2512 pattern = "mutable",
2513 setter(into, strip_option),
2514 default
2515)]
2516#[builder(build_fn(error = "OpenAIError"))]
2517pub struct TokenCountsBody {
2518 /// The conversation that this response belongs to. Items from this
2519 /// conversation are prepended to `input_items` for this response request.
2520 /// Input items and output items from this response are automatically added to this
2521 /// conversation after this response completes.
2522 #[serde(skip_serializing_if = "Option::is_none")]
2523 pub conversation: Option<ConversationParam>,
2524
2525 /// Text, image, or file inputs to the model, used to generate a response
2526 #[serde(skip_serializing_if = "Option::is_none")]
2527 pub input: Option<InputParam>,
2528
2529 /// A system (or developer) message inserted into the model's context.
2530 ///
2531 /// When used along with `previous_response_id`, the instructions from a previous response will
2532 /// not be carried over to the next response. This makes it simple to swap out system (or
2533 /// developer) messages in new responses.
2534 #[serde(skip_serializing_if = "Option::is_none")]
2535 pub instructions: Option<String>,
2536
2537 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2538 /// wide range of models with different capabilities, performance characteristics,
2539 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2540 /// to browse and compare available models.
2541 #[serde(skip_serializing_if = "Option::is_none")]
2542 pub model: Option<String>,
2543
2544 /// Whether to allow the model to run tool calls in parallel.
2545 #[serde(skip_serializing_if = "Option::is_none")]
2546 pub parallel_tool_calls: Option<bool>,
2547
2548 /// The unique ID of the previous response to the model. Use this to create multi-turn
2549 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2550 /// Cannot be used in conjunction with `conversation`.
2551 #[serde(skip_serializing_if = "Option::is_none")]
2552 pub previous_response_id: Option<String>,
2553
2554 /// **gpt-5 and o-series models only**
2555 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2556 #[serde(skip_serializing_if = "Option::is_none")]
2557 pub reasoning: Option<Reasoning>,
2558
2559 /// Configuration options for a text response from the model. Can be plain
2560 /// text or structured JSON data. Learn more:
2561 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2562 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2563 #[serde(skip_serializing_if = "Option::is_none")]
2564 pub text: Option<ResponseTextParam>,
2565
2566 /// How the model should select which tool (or tools) to use when generating
2567 /// a response. See the `tools` parameter to see how to specify which tools
2568 /// the model can call.
2569 #[serde(skip_serializing_if = "Option::is_none")]
2570 pub tool_choice: Option<ToolChoiceParam>,
2571
2572 /// An array of tools the model may call while generating a response. You can specify which tool
2573 /// to use by setting the `tool_choice` parameter.
2574 #[serde(skip_serializing_if = "Option::is_none")]
2575 pub tools: Option<Vec<Tool>>,
2576
2577 ///The truncation strategy to use for the model response.
2578 /// - `auto`: If the input to this Response exceeds
2579 /// the model's context window size, the model will truncate the
2580 /// response to fit the context window by dropping items from the beginning of the conversation.
2581 /// - `disabled` (default): If the input size will exceed the context window
2582 /// size for a model, the request will fail with a 400 error.
2583 #[serde(skip_serializing_if = "Option::is_none")]
2584 pub truncation: Option<Truncation>,
2585}
2586
2587#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2588pub struct TokenCountsResource {
2589 pub object: String,
2590 pub input_tokens: u32,
2591}