async_openai/types/responses/response.rs
1use crate::error::OpenAIError;
2pub use crate::types::chat::{
3 CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort,
4 ResponseFormatJsonSchema,
5};
6use crate::types::{MCPListToolsTool, MCPTool};
7use derive_builder::Builder;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10
11/// Role of messages in the API.
12#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
13#[serde(rename_all = "lowercase")]
14pub enum Role {
15 #[default]
16 User,
17 Assistant,
18 System,
19 Developer,
20}
21
22/// Status of input/output items.
23#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
24#[serde(rename_all = "snake_case")]
25pub enum OutputStatus {
26 InProgress,
27 Completed,
28 Incomplete,
29}
30
31#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
32#[serde(untagged)]
33pub enum InputParam {
34 /// A text input to the model, equivalent to a text input with the
35 /// `user` role.
36 Text(String),
37 /// A list of one or many input items to the model, containing
38 /// different content types.
39 Items(Vec<InputItem>),
40}
41
42impl Default for InputParam {
43 fn default() -> Self {
44 Self::Text(String::new())
45 }
46}
47
48/// Content item used to generate a response.
49///
50/// This is a properly discriminated union based on the `type` field, using Rust's
51/// type-safe enum with serde's tag attribute for efficient deserialization.
52///
53/// # OpenAPI Specification
54/// Corresponds to the `Item` schema in the OpenAPI spec with a `type` discriminator.
55#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
56#[serde(tag = "type", rename_all = "snake_case")]
57pub enum Item {
58 /// A message (type: "message").
59 /// Can represent InputMessage (user/system/developer) or OutputMessage (assistant).
60 ///
61 /// InputMessage:
62 /// A message input to the model with a role indicating instruction following hierarchy.
63 /// Instructions given with the developer or system role take precedence over instructions given with the user role.
64 /// OutputMessage:
65 /// A message output from the model.
66 Message(MessageItem),
67
68 /// The results of a file search tool call. See the
69 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
70 FileSearchCall(FileSearchToolCall),
71
72 /// A tool call to a computer use tool. See the
73 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
74 ComputerCall(ComputerToolCall),
75
76 /// The output of a computer tool call.
77 ComputerCallOutput(ComputerCallOutputItemParam),
78
79 /// The results of a web search tool call. See the
80 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
81 WebSearchCall(WebSearchToolCall),
82
83 /// A tool call to run a function. See the
84 ///
85 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
86 FunctionCall(FunctionToolCall),
87
88 /// The output of a function tool call.
89 FunctionCallOutput(FunctionCallOutputItemParam),
90
91 /// A description of the chain of thought used by a reasoning model while generating
92 /// a response. Be sure to include these items in your `input` to the Responses API
93 /// for subsequent turns of a conversation if you are manually
94 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
95 Reasoning(ReasoningItem),
96
97 /// An image generation request made by the model.
98 ImageGenerationCall(ImageGenToolCall),
99
100 /// A tool call to run code.
101 CodeInterpreterCall(CodeInterpreterToolCall),
102
103 /// A tool call to run a command on the local shell.
104 LocalShellCall(LocalShellToolCall),
105
106 /// The output of a local shell tool call.
107 LocalShellCallOutput(LocalShellToolCallOutput),
108
109 /// A tool representing a request to execute one or more shell commands.
110 FunctionShellCall(FunctionShellCallItemParam),
111
112 /// The streamed output items emitted by a function shell tool call.
113 FunctionShellCallOutput(FunctionShellCallOutputItemParam),
114
115 /// A tool call representing a request to create, delete, or update files using diff patches.
116 ApplyPatchCall(ApplyPatchToolCallItemParam),
117
118 /// The streamed output emitted by an apply patch tool call.
119 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
120
121 /// A list of tools available on an MCP server.
122 McpListTools(MCPListTools),
123
124 /// A request for human approval of a tool invocation.
125 McpApprovalRequest(MCPApprovalRequest),
126
127 /// A response to an MCP approval request.
128 McpApprovalResponse(MCPApprovalResponse),
129
130 /// An invocation of a tool on an MCP server.
131 McpCall(MCPToolCall),
132
133 /// The output of a custom tool call from your code, being sent back to the model.
134 CustomToolCallOutput(CustomToolCallOutput),
135
136 /// A call to a custom tool created by the model.
137 CustomToolCall(CustomToolCall),
138}
139
140/// Input item that can be used in the context for generating a response.
141///
142/// This represents the OpenAPI `InputItem` schema which is an `anyOf`:
143/// 1. `EasyInputMessage` - Simple, user-friendly message input (can use string content)
144/// 2. `Item` - Structured items with proper type discrimination (including InputMessage, OutputMessage, tool calls)
145/// 3. `ItemReferenceParam` - Reference to an existing item by ID (type can be null)
146///
147/// Uses untagged deserialization because these types overlap in structure.
148/// Order matters: more specific structures are tried first.
149///
150/// # OpenAPI Specification
151/// Corresponds to the `InputItem` schema: `anyOf[EasyInputMessage, Item, ItemReferenceParam]`
152#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
153#[serde(untagged)]
154pub enum InputItem {
155 /// A reference to an existing item by ID.
156 /// Has a required `id` field and optional `type` (can be "item_reference" or null).
157 /// Must be tried first as it's the most minimal structure.
158 ItemReference(ItemReference),
159
160 /// All structured items with proper type discrimination.
161 /// Includes InputMessage, OutputMessage, and all tool calls/outputs.
162 /// Uses the discriminated `Item` enum for efficient, type-safe deserialization.
163 Item(Item),
164
165 /// A simple, user-friendly message input (EasyInputMessage).
166 /// Supports string content and can include assistant role for previous responses.
167 /// Must be tried last as it's the most flexible structure.
168 ///
169 /// A message input to the model with a role indicating instruction following
170 /// hierarchy. Instructions given with the `developer` or `system` role take
171 /// precedence over instructions given with the `user` role. Messages with the
172 /// `assistant` role are presumed to have been generated by the model in previous
173 /// interactions.
174 EasyMessage(EasyInputMessage),
175}
176
177impl InputItem {
178 /// Creates an InputItem from an item reference ID.
179 pub fn from_reference(id: impl Into<String>) -> Self {
180 Self::ItemReference(ItemReference::new(id))
181 }
182
183 /// Creates an InputItem from a structured Item.
184 pub fn from_item(item: Item) -> Self {
185 Self::Item(item)
186 }
187
188 /// Creates an InputItem from an EasyInputMessage.
189 pub fn from_easy_message(message: EasyInputMessage) -> Self {
190 Self::EasyMessage(message)
191 }
192
193 /// Creates a simple text message with the given role and content.
194 pub fn text_message(role: Role, content: impl Into<String>) -> Self {
195 Self::EasyMessage(EasyInputMessage {
196 r#type: MessageType::Message,
197 role,
198 content: EasyInputContent::Text(content.into()),
199 })
200 }
201}
202
203/// A message item used within the `Item` enum.
204///
205/// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged
206/// enum to distinguish them based on their structure:
207/// - OutputMessage: role=assistant, required id & status fields
208/// - InputMessage: role=user/system/developer, content is Vec<ContentType>, optional id/status
209///
210/// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`,
211/// not part of the structured `Item` enum.
212#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
213#[serde(untagged)]
214pub enum MessageItem {
215 /// An output message from the model (role: assistant, has required id & status).
216 /// This must come first as it has the most specific structure (required id and status fields).
217 Output(OutputMessage),
218
219 /// A structured input message (role: user/system/developer, content is Vec<ContentType>).
220 /// Has structured content list and optional id/status fields.
221 ///
222 /// A message input to the model with a role indicating instruction following hierarchy.
223 /// Instructions given with the `developer` or `system` role take precedence over instructions
224 /// given with the `user` role.
225 Input(InputMessage),
226}
227
228/// A reference to an existing item by ID.
229#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
230pub struct ItemReference {
231 /// The type of item to reference. Can be "item_reference" or null.
232 #[serde(skip_serializing_if = "Option::is_none")]
233 pub r#type: Option<ItemReferenceType>,
234 /// The ID of the item to reference.
235 pub id: String,
236}
237
238impl ItemReference {
239 /// Create a new item reference with the given ID.
240 pub fn new(id: impl Into<String>) -> Self {
241 Self {
242 r#type: Some(ItemReferenceType::ItemReference),
243 id: id.into(),
244 }
245 }
246}
247
248#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
249#[serde(rename_all = "snake_case")]
250pub enum ItemReferenceType {
251 ItemReference,
252}
253
254/// Output from a function call that you're providing back to the model.
255#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
256pub struct FunctionCallOutputItemParam {
257 /// The unique ID of the function tool call generated by the model.
258 pub call_id: String,
259 /// Text, image, or file output of the function tool call.
260 pub output: FunctionCallOutput,
261 /// The unique ID of the function tool call output.
262 /// Populated when this item is returned via API.
263 #[serde(skip_serializing_if = "Option::is_none")]
264 pub id: Option<String>,
265 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
266 /// Populated when items are returned via API.
267 #[serde(skip_serializing_if = "Option::is_none")]
268 pub status: Option<OutputStatus>,
269}
270
271#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
272#[serde(untagged)]
273pub enum FunctionCallOutput {
274 /// A JSON string of the output of the function tool call.
275 Text(String),
276 Content(Vec<InputContent>), // TODO use shape which allows null from OpenAPI spec?
277}
278
279#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
280pub struct ComputerCallOutputItemParam {
281 /// The ID of the computer tool call that produced the output.
282 pub call_id: String,
283 /// A computer screenshot image used with the computer use tool.
284 pub output: ComputerScreenshotImage,
285 /// The safety checks reported by the API that have been acknowledged by the developer.
286 #[serde(skip_serializing_if = "Option::is_none")]
287 pub acknowledged_safety_checks: Option<Vec<ComputerCallSafetyCheckParam>>,
288 /// The unique ID of the computer tool call output. Optional when creating.
289 #[serde(skip_serializing_if = "Option::is_none")]
290 pub id: Option<String>,
291 /// The status of the message input. One of `in_progress`, `completed`, or `incomplete`.
292 /// Populated when input items are returned via API.
293 #[serde(skip_serializing_if = "Option::is_none")]
294 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
295}
296
297#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
298#[serde(rename_all = "snake_case")]
299pub enum ComputerScreenshotImageType {
300 ComputerScreenshot,
301}
302
303/// A computer screenshot image used with the computer use tool.
304#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
305pub struct ComputerScreenshotImage {
306 /// Specifies the event type. For a computer screenshot, this property is always
307 /// set to `computer_screenshot`.
308 pub r#type: ComputerScreenshotImageType,
309 /// The identifier of an uploaded file that contains the screenshot.
310 #[serde(skip_serializing_if = "Option::is_none")]
311 pub file_id: Option<String>,
312 /// The URL of the screenshot image.
313 #[serde(skip_serializing_if = "Option::is_none")]
314 pub image_url: Option<String>,
315}
316
317/// Output from a local shell tool call that you're providing back to the model.
318#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
319pub struct LocalShellToolCallOutput {
320 /// The unique ID of the local shell tool call generated by the model.
321 pub id: String,
322
323 /// A JSON string of the output of the local shell tool call.
324 pub output: String,
325
326 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
327 #[serde(skip_serializing_if = "Option::is_none")]
328 pub status: Option<OutputStatus>,
329}
330
331/// Output from a local shell command execution.
332#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
333pub struct LocalShellOutput {
334 /// The stdout output from the command.
335 #[serde(skip_serializing_if = "Option::is_none")]
336 pub stdout: Option<String>,
337
338 /// The stderr output from the command.
339 #[serde(skip_serializing_if = "Option::is_none")]
340 pub stderr: Option<String>,
341
342 /// The exit code of the command.
343 #[serde(skip_serializing_if = "Option::is_none")]
344 pub exit_code: Option<i32>,
345}
346
347/// An MCP approval response that you're providing back to the model.
348#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
349pub struct MCPApprovalResponse {
350 /// The ID of the approval request being answered.
351 pub approval_request_id: String,
352
353 /// Whether the request was approved.
354 pub approve: bool,
355
356 /// The unique ID of the approval response
357 #[serde(skip_serializing_if = "Option::is_none")]
358 pub id: Option<String>,
359
360 /// Optional reason for the decision.
361 #[serde(skip_serializing_if = "Option::is_none")]
362 pub reason: Option<String>,
363}
364
365#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
366#[serde(untagged)]
367pub enum CustomToolCallOutputOutput {
368 /// A string of the output of the custom tool call.
369 Text(String),
370 /// Text, image, or file output of the custom tool call.
371 List(Vec<InputContent>),
372}
373
374#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
375pub struct CustomToolCallOutput {
376 /// The call ID, used to map this custom tool call output to a custom tool call.
377 pub call_id: String,
378
379 /// The output from the custom tool call generated by your code.
380 /// Can be a string or an list of output content.
381 pub output: CustomToolCallOutputOutput,
382
383 /// The unique ID of the custom tool call output in the OpenAI platform.
384 #[serde(skip_serializing_if = "Option::is_none")]
385 pub id: Option<String>,
386}
387
388/// A simplified message input to the model (EasyInputMessage in the OpenAPI spec).
389///
390/// This is the most user-friendly way to provide messages, supporting both simple
391/// string content and structured content. Role can include `assistant` for providing
392/// previous assistant responses.
393#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
394#[builder(
395 name = "EasyInputMessageArgs",
396 pattern = "mutable",
397 setter(into, strip_option),
398 default
399)]
400#[builder(build_fn(error = "OpenAIError"))]
401pub struct EasyInputMessage {
402 /// The type of the message input. Always set to `message`.
403 pub r#type: MessageType,
404 /// The role of the message input. One of `user`, `assistant`, `system`, or `developer`.
405 pub role: Role,
406 /// Text, image, or audio input to the model, used to generate a response.
407 /// Can also contain previous assistant responses.
408 pub content: EasyInputContent,
409}
410
411/// A structured message input to the model (InputMessage in the OpenAPI spec).
412///
413/// This variant requires structured content (not a simple string) and does not support
414/// the `assistant` role (use OutputMessage for that). Used when items are returned via API
415/// with additional metadata.
416#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
417#[builder(
418 name = "InputMessageArgs",
419 pattern = "mutable",
420 setter(into, strip_option),
421 default
422)]
423#[builder(build_fn(error = "OpenAIError"))]
424pub struct InputMessage {
425 /// A list of one or many input items to the model, containing different content types.
426 pub content: Vec<InputContent>,
427 /// The role of the message input. One of `user`, `system`, or `developer`.
428 /// Note: `assistant` is NOT allowed here; use OutputMessage instead.
429 pub role: InputRole,
430 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
431 /// Populated when items are returned via API.
432 #[serde(skip_serializing_if = "Option::is_none")]
433 pub status: Option<OutputStatus>,
434 /////The type of the message input. Always set to `message`.
435 //pub r#type: MessageType,
436}
437
438/// The role for an input message - can only be `user`, `system`, or `developer`.
439/// This type ensures type safety by excluding the `assistant` role (use OutputMessage for that).
440#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
441#[serde(rename_all = "lowercase")]
442pub enum InputRole {
443 #[default]
444 User,
445 System,
446 Developer,
447}
448
449/// Content for EasyInputMessage - can be a simple string or structured list.
450#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
451#[serde(untagged)]
452pub enum EasyInputContent {
453 /// A text input to the model.
454 Text(String),
455 /// A list of one or many input items to the model, containing different content types.
456 ContentList(Vec<InputContent>),
457}
458
459/// Parts of a message: text, image, file, or audio.
460#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
461#[serde(tag = "type", rename_all = "snake_case")]
462pub enum InputContent {
463 /// A text input to the model.
464 InputText(InputTextContent),
465 /// An image input to the model. Learn about
466 /// [image inputs](https://platform.openai.com/docs/guides/vision).
467 InputImage(InputImageContent),
468 /// A file input to the model.
469 InputFile(InputFileContent),
470}
471
472#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
473pub struct InputTextContent {
474 /// The text input to the model.
475 pub text: String,
476}
477
478#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
479#[builder(
480 name = "InputImageArgs",
481 pattern = "mutable",
482 setter(into, strip_option),
483 default
484)]
485#[builder(build_fn(error = "OpenAIError"))]
486pub struct InputImageContent {
487 /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`.
488 /// Defaults to `auto`.
489 detail: ImageDetail,
490 /// The ID of the file to be sent to the model.
491 #[serde(skip_serializing_if = "Option::is_none")]
492 file_id: Option<String>,
493 /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image
494 /// in a data URL.
495 #[serde(skip_serializing_if = "Option::is_none")]
496 image_url: Option<String>,
497}
498
499#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
500#[builder(
501 name = "InputFileArgs",
502 pattern = "mutable",
503 setter(into, strip_option),
504 default
505)]
506#[builder(build_fn(error = "OpenAIError"))]
507pub struct InputFileContent {
508 /// The content of the file to be sent to the model.
509 #[serde(skip_serializing_if = "Option::is_none")]
510 file_data: Option<String>,
511 /// The ID of the file to be sent to the model.
512 #[serde(skip_serializing_if = "Option::is_none")]
513 file_id: Option<String>,
514 /// The URL of the file to be sent to the model.
515 #[serde(skip_serializing_if = "Option::is_none")]
516 file_url: Option<String>,
517 /// The name of the file to be sent to the model.
518 #[serde(skip_serializing_if = "Option::is_none")]
519 filename: Option<String>,
520}
521
522#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
523pub struct Conversation {
524 /// The unique ID of the conversation.
525 pub id: String,
526}
527
528#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
529#[serde(untagged)]
530pub enum ConversationParam {
531 /// The unique ID of the conversation.
532 ConversationID(String),
533 /// The conversation that this response belongs to.
534 Object(Conversation),
535}
536
537#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
538pub enum IncludeEnum {
539 #[serde(rename = "file_search_call.results")]
540 FileSearchCallResults,
541 #[serde(rename = "web_search_call.results")]
542 WebSearchCallResults,
543 #[serde(rename = "web_search_call.action.sources")]
544 WebSearchCallActionSources,
545 #[serde(rename = "message.input_image.image_url")]
546 MessageInputImageImageUrl,
547 #[serde(rename = "computer_call_output.output.image_url")]
548 ComputerCallOutputOutputImageUrl,
549 #[serde(rename = "code_interpreter_call.outputs")]
550 CodeInterpreterCallOutputs,
551 #[serde(rename = "reasoning.encrypted_content")]
552 ReasoningEncryptedContent,
553 #[serde(rename = "message.output_text.logprobs")]
554 MessageOutputTextLogprobs,
555}
556
557#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
558pub struct ResponseStreamOptions {
559 /// When true, stream obfuscation will be enabled. Stream obfuscation adds
560 /// random characters to an `obfuscation` field on streaming delta events to
561 /// normalize payload sizes as a mitigation to certain side-channel attacks.
562 /// These obfuscation fields are included by default, but add a small amount
563 /// of overhead to the data stream. You can set `include_obfuscation` to
564 /// false to optimize for bandwidth if you trust the network links between
565 /// your application and the OpenAI API.
566 #[serde(skip_serializing_if = "Option::is_none")]
567 pub include_obfuscation: Option<bool>,
568}
569
570/// Builder for a Responses API request.
571#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
572#[builder(
573 name = "CreateResponseArgs",
574 pattern = "mutable",
575 setter(into, strip_option),
576 default
577)]
578#[builder(build_fn(error = "OpenAIError"))]
579pub struct CreateResponse {
580 /// Whether to run the model response in the background.
581 /// [Learn more](https://platform.openai.com/docs/guides/background).
582 #[serde(skip_serializing_if = "Option::is_none")]
583 pub background: Option<bool>,
584
585 /// The conversation that this response belongs to. Items from this conversation are prepended to
586 /// `input_items` for this response request.
587 ///
588 /// Input items and output items from this response are automatically added to this conversation after
589 /// this response completes.
590 #[serde(skip_serializing_if = "Option::is_none")]
591 pub conversation: Option<ConversationParam>,
592
593 /// Specify additional output data to include in the model response. Currently supported
594 /// values are:
595 ///
596 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
597 ///
598 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
599 /// interpreter tool call items.
600 ///
601 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
602 /// output.
603 ///
604 /// - `file_search_call.results`: Include the search results of the file search tool call.
605 ///
606 /// - `message.input_image.image_url`: Include image urls from the input message.
607 ///
608 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
609 ///
610 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
611 /// reasoning item outputs. This enables reasoning items to be used in multi-turn
612 /// conversations when using the Responses API statelessly (like when the `store` parameter is
613 /// set to `false`, or when an organization is enrolled in the zero data retention program).
614 #[serde(skip_serializing_if = "Option::is_none")]
615 pub include: Option<Vec<IncludeEnum>>,
616
617 /// Text, image, or file inputs to the model, used to generate a response.
618 ///
619 /// Learn more:
620 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
621 /// - [Image inputs](https://platform.openai.com/docs/guides/images)
622 /// - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
623 /// - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
624 /// - [Function calling](https://platform.openai.com/docs/guides/function-calling)
625 pub input: InputParam,
626
627 /// A system (or developer) message inserted into the model's context.
628 ///
629 /// When using along with `previous_response_id`, the instructions from a previous
630 /// response will not be carried over to the next response. This makes it simple
631 /// to swap out system (or developer) messages in new responses.
632 #[serde(skip_serializing_if = "Option::is_none")]
633 pub instructions: Option<String>,
634
635 /// An upper bound for the number of tokens that can be generated for a response, including
636 /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
637 #[serde(skip_serializing_if = "Option::is_none")]
638 pub max_output_tokens: Option<u32>,
639
640 /// The maximum number of total calls to built-in tools that can be processed in a response. This
641 /// maximum number applies across all built-in tool calls, not per individual tool. Any further
642 /// attempts to call a tool by the model will be ignored.
643 #[serde(skip_serializing_if = "Option::is_none")]
644 pub max_tool_calls: Option<u32>,
645
646 /// Set of 16 key-value pairs that can be attached to an object. This can be
647 /// useful for storing additional information about the object in a structured
648 /// format, and querying for objects via API or the dashboard.
649 ///
650 /// Keys are strings with a maximum length of 64 characters. Values are
651 /// strings with a maximum length of 512 characters.
652 #[serde(skip_serializing_if = "Option::is_none")]
653 pub metadata: Option<HashMap<String, String>>,
654
655 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
656 /// offers a wide range of models with different capabilities, performance
657 /// characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
658 /// to browse and compare available models.
659 #[serde(skip_serializing_if = "Option::is_none")]
660 pub model: Option<String>,
661
662 /// Whether to allow the model to run tool calls in parallel.
663 #[serde(skip_serializing_if = "Option::is_none")]
664 pub parallel_tool_calls: Option<bool>,
665
666 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
667 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
668 /// Cannot be used in conjunction with `conversation`.
669 #[serde(skip_serializing_if = "Option::is_none")]
670 pub previous_response_id: Option<String>,
671
672 /// Reference to a prompt template and its variables.
673 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
674 #[serde(skip_serializing_if = "Option::is_none")]
675 pub prompt: Option<Prompt>,
676
677 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
678 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
679 #[serde(skip_serializing_if = "Option::is_none")]
680 pub prompt_cache_key: Option<String>,
681
682 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
683 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
684 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
685 #[serde(skip_serializing_if = "Option::is_none")]
686 pub prompt_cache_retention: Option<PromptCacheRetention>,
687
688 /// **gpt-5 and o-series models only**
689 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
690 #[serde(skip_serializing_if = "Option::is_none")]
691 pub reasoning: Option<Reasoning>,
692
693 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
694 /// usage policies.
695 ///
696 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
697 /// or email address, in order to avoid sending us any identifying information. [Learn
698 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
699 #[serde(skip_serializing_if = "Option::is_none")]
700 pub safety_identifier: Option<String>,
701
702 /// Specifies the processing type used for serving the request.
703 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
704 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
705 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
706 /// - When not set, the default behavior is 'auto'.
707 ///
708 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
709 #[serde(skip_serializing_if = "Option::is_none")]
710 pub service_tier: Option<ServiceTier>,
711
712 /// Whether to store the generated model response for later retrieval via API.
713 #[serde(skip_serializing_if = "Option::is_none")]
714 pub store: Option<bool>,
715
716 /// If set to true, the model response data will be streamed to the client
717 /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
718 /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
719 /// for more information.
720 #[serde(skip_serializing_if = "Option::is_none")]
721 pub stream: Option<bool>,
722
723 /// Options for streaming responses. Only set this when you set `stream: true`.
724 #[serde(skip_serializing_if = "Option::is_none")]
725 pub stream_options: Option<ResponseStreamOptions>,
726
727 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8
728 /// will make the output more random, while lower values like 0.2 will make it
729 /// more focused and deterministic. We generally recommend altering this or
730 /// `top_p` but not both.
731 #[serde(skip_serializing_if = "Option::is_none")]
732 pub temperature: Option<f32>,
733
734 /// Configuration options for a text response from the model. Can be plain
735 /// text or structured JSON data. Learn more:
736 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
737 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
738 #[serde(skip_serializing_if = "Option::is_none")]
739 pub text: Option<ResponseTextParam>,
740
741 /// How the model should select which tool (or tools) to use when generating
742 /// a response. See the `tools` parameter to see how to specify which tools
743 /// the model can call.
744 #[serde(skip_serializing_if = "Option::is_none")]
745 pub tool_choice: Option<ToolChoiceParam>,
746
747 /// An array of tools the model may call while generating a response. You
748 /// can specify which tool to use by setting the `tool_choice` parameter.
749 ///
750 /// We support the following categories of tools:
751 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
752 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
753 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
754 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
755 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
756 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
757 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
758 /// - **Function calls (custom tools)**: Functions that are defined by you,
759 /// enabling the model to call your own code with strongly typed arguments
760 /// and outputs. Learn more about
761 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
762 /// custom tools to call your own code.
763 #[serde(skip_serializing_if = "Option::is_none")]
764 pub tools: Option<Vec<Tool>>,
765
766 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
767 /// token position, each with an associated log probability.
768 #[serde(skip_serializing_if = "Option::is_none")]
769 pub top_logprobs: Option<u8>,
770
771 /// An alternative to sampling with temperature, called nucleus sampling,
772 /// where the model considers the results of the tokens with top_p probability
773 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
774 /// are considered.
775 ///
776 /// We generally recommend altering this or `temperature` but not both.
777 #[serde(skip_serializing_if = "Option::is_none")]
778 pub top_p: Option<f32>,
779
780 ///The truncation strategy to use for the model response.
781 /// - `auto`: If the input to this Response exceeds
782 /// the model's context window size, the model will truncate the
783 /// response to fit the context window by dropping items from the beginning of the conversation.
784 /// - `disabled` (default): If the input size will exceed the context window
785 /// size for a model, the request will fail with a 400 error.
786 #[serde(skip_serializing_if = "Option::is_none")]
787 pub truncation: Option<Truncation>,
788}
789
790#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
791#[serde(untagged)]
792pub enum ResponsePromptVariables {
793 String(String),
794 Content(InputContent),
795 Custom(serde_json::Value),
796}
797
798#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
799pub struct Prompt {
800 /// The unique identifier of the prompt template to use.
801 pub id: String,
802
803 /// Optional version of the prompt template.
804 #[serde(skip_serializing_if = "Option::is_none")]
805 pub version: Option<String>,
806
807 /// Optional map of values to substitute in for variables in your
808 /// prompt. The substitution values can either be strings, or other
809 /// Response input types like images or files.
810 #[serde(skip_serializing_if = "Option::is_none")]
811 pub variables: Option<ResponsePromptVariables>,
812}
813
814#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Default)]
815#[serde(rename_all = "lowercase")]
816pub enum ServiceTier {
817 #[default]
818 Auto,
819 Default,
820 Flex,
821 Scale,
822 Priority,
823}
824
825/// Truncation strategies.
826#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
827#[serde(rename_all = "lowercase")]
828pub enum Truncation {
829 Auto,
830 Disabled,
831}
832
833#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
834pub struct Billing {
835 pub payer: String,
836}
837
838/// o-series reasoning settings.
839#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
840#[builder(
841 name = "ReasoningArgs",
842 pattern = "mutable",
843 setter(into, strip_option),
844 default
845)]
846#[builder(build_fn(error = "OpenAIError"))]
847pub struct Reasoning {
848 /// Constrains effort on reasoning for
849 /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
850 /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
851 /// reasoning effort can result in faster responses and fewer tokens used
852 /// on reasoning in a response.
853 ///
854 /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
855 #[serde(skip_serializing_if = "Option::is_none")]
856 pub effort: Option<ReasoningEffort>,
857 /// A summary of the reasoning performed by the model. This can be
858 /// useful for debugging and understanding the model's reasoning process.
859 /// One of `auto`, `concise`, or `detailed`.
860 ///
861 /// `concise` is only supported for `computer-use-preview` models.
862 #[serde(skip_serializing_if = "Option::is_none")]
863 pub summary: Option<ReasoningSummary>,
864}
865
866/// o-series reasoning settings.
867#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
868#[serde(rename_all = "lowercase")]
869pub enum Verbosity {
870 Low,
871 Medium,
872 High,
873}
874
875#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
876#[serde(rename_all = "lowercase")]
877pub enum ReasoningSummary {
878 Auto,
879 Concise,
880 Detailed,
881}
882
883/// The retention policy for the prompt cache.
884#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
885pub enum PromptCacheRetention {
886 #[serde(rename = "in-memory")]
887 InMemory,
888 #[serde(rename = "24h")]
889 Hours24,
890}
891
892/// Configuration for text response format.
893#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
894pub struct ResponseTextParam {
895 /// An object specifying the format that the model must output.
896 ///
897 /// Configuring `{ "type": "json_schema" }` enables Structured Outputs,
898 /// which ensures the model will match your supplied JSON schema. Learn more in the
899 /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
900 ///
901 /// The default format is `{ "type": "text" }` with no additional options.
902 ///
903 /// **Not recommended for gpt-4o and newer models:**
904 ///
905 /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
906 /// ensures the message the model generates is valid JSON. Using `json_schema`
907 /// is preferred for models that support it.
908 pub format: TextResponseFormatConfiguration,
909
910 /// Constrains the verbosity of the model's response. Lower values will result in
911 /// more concise responses, while higher values will result in more verbose responses.
912 ///
913 /// Currently supported values are `low`, `medium`, and `high`.
914 #[serde(skip_serializing_if = "Option::is_none")]
915 pub verbosity: Option<Verbosity>,
916}
917
918#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
919#[serde(tag = "type", rename_all = "snake_case")]
920pub enum TextResponseFormatConfiguration {
921 /// Default response format. Used to generate text responses.
922 Text,
923 /// JSON object response format. An older method of generating JSON responses.
924 /// Using `json_schema` is recommended for models that support it.
925 /// Note that the model will not generate JSON without a system or user message
926 /// instructing it to do so.
927 JsonObject,
928 /// JSON Schema response format. Used to generate structured JSON responses.
929 /// Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
930 JsonSchema(ResponseFormatJsonSchema),
931}
932
933/// Definitions for model-callable tools.
934#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
935#[serde(tag = "type", rename_all = "snake_case")]
936pub enum Tool {
937 /// Defines a function in your own code the model can choose to call. Learn more about [function
938 /// calling](https://platform.openai.com/docs/guides/tools).
939 Function(FunctionTool),
940 /// A tool that searches for relevant content from uploaded files. Learn more about the [file search
941 /// tool](https://platform.openai.com/docs/guides/tools-file-search).
942 FileSearch(FileSearchTool),
943 /// A tool that controls a virtual computer. Learn more about the [computer
944 /// use tool](https://platform.openai.com/docs/guides/tools-computer-use).
945 ComputerUsePreview(ComputerUsePreviewTool),
946 /// Search the Internet for sources related to the prompt. Learn more about the
947 /// [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
948 WebSearch(WebSearchTool),
949 /// type: web_search_2025_08_26
950 #[serde(rename = "web_search_2025_08_26")]
951 WebSearch20250826(WebSearchTool),
952 /// Give the model access to additional tools via remote Model Context Protocol
953 /// (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
954 Mcp(MCPTool),
955 /// A tool that runs Python code to help generate a response to a prompt.
956 CodeInterpreter(CodeInterpreterTool),
957 /// A tool that generates images using a model like `gpt-image-1`.
958 ImageGeneration(ImageGenTool),
959 /// A tool that allows the model to execute shell commands in a local environment.
960 LocalShell,
961 /// A tool that allows the model to execute shell commands.
962 Shell,
963 /// A custom tool that processes input using a specified format. Learn more about [custom
964 /// tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
965 Custom(CustomToolParam),
966 /// This tool searches the web for relevant results to use in a response. Learn more about the [web search
967 ///tool](https://platform.openai.com/docs/guides/tools-web-search).
968 WebSearchPreview(WebSearchTool),
969 /// type: web_search_preview_2025_03_11
970 #[serde(rename = "web_search_preview_2025_03_11")]
971 WebSearchPreview20250311(WebSearchTool),
972 /// Allows the assistant to create, delete, or update files using unified diffs.
973 ApplyPatch,
974}
975
976#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
977pub struct CustomToolParam {
978 /// The name of the custom tool, used to identify it in tool calls.
979 pub name: String,
980 /// Optional description of the custom tool, used to provide more context.
981 pub description: Option<String>,
982 /// The input format for the custom tool. Default is unconstrained text.
983 pub format: CustomToolParamFormat,
984}
985
986#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
987#[serde(rename_all = "lowercase")]
988pub enum GrammarSyntax {
989 Lark,
990 #[default]
991 Regex,
992}
993
994#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
995pub struct CustomGrammarFormatParam {
996 /// The grammar definition.
997 pub definition: String,
998 /// The syntax of the grammar definition. One of `lark` or `regex`.
999 pub syntax: GrammarSyntax,
1000}
1001
1002#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1003#[serde(tag = "type", rename_all = "lowercase")]
1004pub enum CustomToolParamFormat {
1005 /// Unconstrained free-form text.
1006 #[default]
1007 Text,
1008 /// A grammar defined by the user.
1009 Grammar(CustomGrammarFormatParam),
1010}
1011
1012#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1013#[builder(
1014 name = "FileSearchToolArgs",
1015 pattern = "mutable",
1016 setter(into, strip_option),
1017 default
1018)]
1019#[builder(build_fn(error = "OpenAIError"))]
1020pub struct FileSearchTool {
1021 /// The IDs of the vector stores to search.
1022 pub vector_store_ids: Vec<String>,
1023 /// The maximum number of results to return. This number should be between 1 and 50 inclusive.
1024 #[serde(skip_serializing_if = "Option::is_none")]
1025 pub max_num_results: Option<u32>,
1026 /// A filter to apply.
1027 #[serde(skip_serializing_if = "Option::is_none")]
1028 pub filters: Option<Filter>,
1029 /// Ranking options for search.
1030 #[serde(skip_serializing_if = "Option::is_none")]
1031 pub ranking_options: Option<RankingOptions>,
1032}
1033
1034#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1035#[builder(
1036 name = "FunctionToolArgs",
1037 pattern = "mutable",
1038 setter(into, strip_option),
1039 default
1040)]
1041pub struct FunctionTool {
1042 /// The name of the function to call.
1043 pub name: String,
1044 /// A JSON schema object describing the parameters of the function.
1045 #[serde(skip_serializing_if = "Option::is_none")]
1046 pub parameters: Option<serde_json::Value>,
1047 /// Whether to enforce strict parameter validation. Default `true`.
1048 #[serde(skip_serializing_if = "Option::is_none")]
1049 pub strict: Option<bool>,
1050 /// A description of the function. Used by the model to determine whether or not to call the
1051 /// function.
1052 #[serde(skip_serializing_if = "Option::is_none")]
1053 pub description: Option<String>,
1054}
1055
1056#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1057pub struct WebSearchToolFilters {
1058 /// Allowed domains for the search. If not provided, all domains are allowed.
1059 /// Subdomains of the provided domains are allowed as well.
1060 ///
1061 /// Example: `["pubmed.ncbi.nlm.nih.gov"]`
1062 #[serde(skip_serializing_if = "Option::is_none")]
1063 pub allowed_domains: Option<Vec<String>>,
1064}
1065
1066#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1067#[builder(
1068 name = "WebSearchToolArgs",
1069 pattern = "mutable",
1070 setter(into, strip_option),
1071 default
1072)]
1073pub struct WebSearchTool {
1074 /// Filters for the search.
1075 #[serde(skip_serializing_if = "Option::is_none")]
1076 pub filters: Option<WebSearchToolFilters>,
1077 /// The approximate location of the user.
1078 #[serde(skip_serializing_if = "Option::is_none")]
1079 pub user_location: Option<WebSearchApproximateLocation>,
1080 /// High level guidance for the amount of context window space to use for the search. One of `low`,
1081 /// `medium`, or `high`. `medium` is the default.
1082 #[serde(skip_serializing_if = "Option::is_none")]
1083 pub search_context_size: Option<WebSearchToolSearchContextSize>,
1084}
1085
1086#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1087#[serde(rename_all = "lowercase")]
1088pub enum WebSearchToolSearchContextSize {
1089 Low,
1090 #[default]
1091 Medium,
1092 High,
1093}
1094
1095#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1096#[serde(rename_all = "lowercase")]
1097pub enum ComputerEnvironment {
1098 Windows,
1099 Mac,
1100 Linux,
1101 Ubuntu,
1102 #[default]
1103 Browser,
1104}
1105
1106#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1107#[builder(
1108 name = "ComputerUsePreviewToolArgs",
1109 pattern = "mutable",
1110 setter(into, strip_option),
1111 default
1112)]
1113pub struct ComputerUsePreviewTool {
1114 /// The type of computer environment to control.
1115 environment: ComputerEnvironment,
1116 /// The width of the computer display.
1117 display_width: u32,
1118 /// The height of the computer display.
1119 display_height: u32,
1120}
1121
1122#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1123pub enum RankVersionType {
1124 #[serde(rename = "auto")]
1125 Auto,
1126 #[serde(rename = "default-2024-11-15")]
1127 Default20241115,
1128}
1129
1130#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1131pub struct HybridSearch {
1132 /// The weight of the embedding in the reciprocal ranking fusion.
1133 pub embedding_weight: f32,
1134 /// The weight of the text in the reciprocal ranking fusion.
1135 pub text_weight: f32,
1136}
1137
1138/// Options for search result ranking.
1139#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1140pub struct RankingOptions {
1141 /// Weights that control how reciprocal rank fusion balances semantic embedding matches versus
1142 /// sparse keyword matches when hybrid search is enabled.
1143 #[serde(skip_serializing_if = "Option::is_none")]
1144 pub hybrid_search: Option<HybridSearch>,
1145 /// The ranker to use for the file search.
1146 pub ranker: RankVersionType,
1147 /// The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
1148 /// attempt to return only the most relevant results, but may return fewer results.
1149 #[serde(skip_serializing_if = "Option::is_none")]
1150 pub score_threshold: Option<f32>,
1151}
1152
1153/// Filters for file search.
1154#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1155#[serde(untagged)]
1156pub enum Filter {
1157 /// A filter used to compare a specified attribute key to a given value using a defined
1158 /// comparison operation.
1159 Comparison(ComparisonFilter),
1160 /// Combine multiple filters using `and` or `or`.
1161 Compound(CompoundFilter),
1162}
1163
1164/// Single comparison filter.
1165#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1166pub struct ComparisonFilter {
1167 /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`.
1168 /// - `eq`: equals
1169 /// - `ne`: not equal
1170 /// - `gt`: greater than
1171 /// - `gte`: greater than or equal
1172 /// - `lt`: less than
1173 /// - `lte`: less than or equal
1174 /// - `in`: in
1175 /// - `nin`: not in
1176 pub r#type: ComparisonType,
1177 /// The key to compare against the value.
1178 pub key: String,
1179 /// The value to compare against the attribute key; supports string, number, or boolean types.
1180 pub value: serde_json::Value,
1181}
1182
1183#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1184pub enum ComparisonType {
1185 #[serde(rename = "eq")]
1186 Equals,
1187 #[serde(rename = "ne")]
1188 NotEquals,
1189 #[serde(rename = "gt")]
1190 GreaterThan,
1191 #[serde(rename = "gte")]
1192 GreaterThanOrEqual,
1193 #[serde(rename = "lt")]
1194 LessThan,
1195 #[serde(rename = "lte")]
1196 LessThanOrEqual,
1197 #[serde(rename = "in")]
1198 In,
1199 #[serde(rename = "nin")]
1200 NotIn,
1201}
1202
1203/// Combine multiple filters using `and` or `or`.
1204#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1205pub struct CompoundFilter {
1206 /// 'Type of operation: `and` or `or`.'
1207 pub r#type: CompoundType,
1208 /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
1209 pub filters: Vec<Filter>,
1210}
1211
1212#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1213#[serde(rename_all = "lowercase")]
1214pub enum CompoundType {
1215 And,
1216 Or,
1217}
1218
1219#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1220#[serde(rename_all = "lowercase")]
1221pub enum WebSearchApproximateLocationType {
1222 #[default]
1223 Approximate,
1224}
1225
1226/// Approximate user location for web search.
1227#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1228#[builder(
1229 name = "WebSearchApproximateLocationArgs",
1230 pattern = "mutable",
1231 setter(into, strip_option),
1232 default
1233)]
1234#[builder(build_fn(error = "OpenAIError"))]
1235pub struct WebSearchApproximateLocation {
1236 /// The type of location approximation. Always `approximate`.
1237 pub r#type: WebSearchApproximateLocationType,
1238 /// Free text input for the city of the user, e.g. `San Francisco`.
1239 #[serde(skip_serializing_if = "Option::is_none")]
1240 pub city: Option<String>,
1241 /// The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,
1242 /// e.g. `US`.
1243 #[serde(skip_serializing_if = "Option::is_none")]
1244 pub country: Option<String>,
1245 /// Free text input for the region of the user, e.g. `California`.
1246 #[serde(skip_serializing_if = "Option::is_none")]
1247 pub region: Option<String>,
1248 /// The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g.
1249 /// `America/Los_Angeles`.
1250 #[serde(skip_serializing_if = "Option::is_none")]
1251 pub timezone: Option<String>,
1252}
1253
1254/// Container configuration for a code interpreter.
1255#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1256#[serde(tag = "type", rename_all = "snake_case")]
1257pub enum CodeInterpreterToolContainer {
1258 /// Configuration for a code interpreter container. Optionally specify the IDs of the
1259 /// files to run the code on.
1260 Auto(CodeInterpreterContainerAuto),
1261
1262 /// The container ID.
1263 #[serde(untagged)]
1264 ContainerID(String),
1265}
1266
1267impl Default for CodeInterpreterToolContainer {
1268 fn default() -> Self {
1269 Self::Auto(CodeInterpreterContainerAuto::default())
1270 }
1271}
1272
1273/// Auto configuration for code interpreter container.
1274#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1275pub struct CodeInterpreterContainerAuto {
1276 /// An optional list of uploaded files to make available to your code.
1277 #[serde(skip_serializing_if = "Option::is_none")]
1278 pub file_ids: Option<Vec<String>>,
1279
1280 #[serde(skip_serializing_if = "Option::is_none")]
1281 pub memory_limit: Option<u64>,
1282}
1283
1284#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1285#[builder(
1286 name = "CodeInterpreterToolArgs",
1287 pattern = "mutable",
1288 setter(into, strip_option),
1289 default
1290)]
1291#[builder(build_fn(error = "OpenAIError"))]
1292pub struct CodeInterpreterTool {
1293 /// The code interpreter container. Can be a container ID or an object that
1294 /// specifies uploaded file IDs to make available to your code.
1295 pub container: CodeInterpreterToolContainer,
1296}
1297
1298#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1299pub struct ImageGenToolInputImageMask {
1300 /// Base64-encoded mask image.
1301 #[serde(skip_serializing_if = "Option::is_none")]
1302 pub image_url: Option<String>,
1303 /// File ID for the mask image.
1304 #[serde(skip_serializing_if = "Option::is_none")]
1305 pub file_id: Option<String>,
1306}
1307
1308#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1309#[serde(rename_all = "lowercase")]
1310pub enum InputFidelity {
1311 #[default]
1312 High,
1313 Low,
1314}
1315
1316#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1317#[serde(rename_all = "lowercase")]
1318pub enum ImageGenToolModeration {
1319 #[default]
1320 Auto,
1321 Low,
1322}
1323
1324/// Image generation tool definition.
1325#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)]
1326#[builder(
1327 name = "ImageGenerationArgs",
1328 pattern = "mutable",
1329 setter(into, strip_option),
1330 default
1331)]
1332#[builder(build_fn(error = "OpenAIError"))]
1333pub struct ImageGenTool {
1334 /// Background type for the generated image. One of `transparent`,
1335 /// `opaque`, or `auto`. Default: `auto`.
1336 #[serde(skip_serializing_if = "Option::is_none")]
1337 pub background: Option<ImageGenToolBackground>,
1338 /// Control how much effort the model will exert to match the style and features, especially facial features,
1339 /// of input images. This parameter is only supported for `gpt-image-1`. Unsupported
1340 /// for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
1341 #[serde(skip_serializing_if = "Option::is_none")]
1342 pub input_fidelity: Option<InputFidelity>,
1343 /// Optional mask for inpainting. Contains `image_url`
1344 /// (string, optional) and `file_id` (string, optional).
1345 #[serde(skip_serializing_if = "Option::is_none")]
1346 pub input_image_mask: Option<ImageGenToolInputImageMask>,
1347 /// The image generation model to use. Default: `gpt-image-1`.
1348 #[serde(skip_serializing_if = "Option::is_none")]
1349 pub model: Option<String>,
1350 /// Moderation level for the generated image. Default: `auto`.
1351 #[serde(skip_serializing_if = "Option::is_none")]
1352 pub moderation: Option<ImageGenToolModeration>,
1353 /// Compression level for the output image. Default: 100.
1354 #[serde(skip_serializing_if = "Option::is_none")]
1355 pub output_compression: Option<u8>,
1356 /// The output format of the generated image. One of `png`, `webp`, or
1357 /// `jpeg`. Default: `png`.
1358 #[serde(skip_serializing_if = "Option::is_none")]
1359 pub output_format: Option<ImageGenToolOutputFormat>,
1360 /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.
1361 #[serde(skip_serializing_if = "Option::is_none")]
1362 pub partial_images: Option<u8>,
1363 /// The quality of the generated image. One of `low`, `medium`, `high`,
1364 /// or `auto`. Default: `auto`.
1365 #[serde(skip_serializing_if = "Option::is_none")]
1366 pub quality: Option<ImageGenToolQuality>,
1367 /// The size of the generated image. One of `1024x1024`, `1024x1536`,
1368 /// `1536x1024`, or `auto`. Default: `auto`.
1369 #[serde(skip_serializing_if = "Option::is_none")]
1370 pub size: Option<ImageGenToolSize>,
1371}
1372
1373#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1374#[serde(rename_all = "lowercase")]
1375pub enum ImageGenToolBackground {
1376 Transparent,
1377 Opaque,
1378 #[default]
1379 Auto,
1380}
1381
1382#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1383#[serde(rename_all = "lowercase")]
1384pub enum ImageGenToolOutputFormat {
1385 #[default]
1386 Png,
1387 Webp,
1388 Jpeg,
1389}
1390
1391#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1392#[serde(rename_all = "lowercase")]
1393pub enum ImageGenToolQuality {
1394 Low,
1395 Medium,
1396 High,
1397 #[default]
1398 Auto,
1399}
1400
1401#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1402#[serde(rename_all = "lowercase")]
1403pub enum ImageGenToolSize {
1404 #[default]
1405 Auto,
1406 #[serde(rename = "1024x1024")]
1407 Size1024x1024,
1408 #[serde(rename = "1024x1536")]
1409 Size1024x1536,
1410 #[serde(rename = "1536x1024")]
1411 Size1536x1024,
1412}
1413
1414#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1415#[serde(rename_all = "lowercase")]
1416pub enum ToolChoiceAllowedMode {
1417 Auto,
1418 Required,
1419}
1420
1421#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1422pub struct ToolChoiceAllowed {
1423 /// Constrains the tools available to the model to a pre-defined set.
1424 ///
1425 /// `auto` allows the model to pick from among the allowed tools and generate a
1426 /// message.
1427 ///
1428 /// `required` requires the model to call one or more of the allowed tools.
1429 pub mode: ToolChoiceAllowedMode,
1430 /// A list of tool definitions that the model should be allowed to call.
1431 ///
1432 /// For the Responses API, the list of tool definitions might look like:
1433 /// ```json
1434 /// [
1435 /// { "type": "function", "name": "get_weather" },
1436 /// { "type": "mcp", "server_label": "deepwiki" },
1437 /// { "type": "image_generation" }
1438 /// ]
1439 /// ```
1440 pub tools: Vec<serde_json::Value>,
1441}
1442
1443/// The type of hosted tool the model should to use. Learn more about
1444/// [built-in tools](https://platform.openai.com/docs/guides/tools).
1445#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1446#[serde(tag = "type", rename_all = "snake_case")]
1447pub enum ToolChoiceTypes {
1448 FileSearch,
1449 WebSearchPreview,
1450 ComputerUsePreview,
1451 CodeInterpreter,
1452 ImageGeneration,
1453}
1454
1455#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1456pub struct ToolChoiceFunction {
1457 /// The name of the function to call.
1458 name: String,
1459}
1460
1461#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1462pub struct ToolChoiceMCP {
1463 /// The name of the tool to call on the server.
1464 name: String,
1465 /// The label of the MCP server to use.
1466 server_label: String,
1467}
1468
1469#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1470pub struct ToolChoiceCustom {
1471 /// The name of the custom tool to call.
1472 name: String,
1473}
1474
1475#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1476#[serde(tag = "type", rename_all = "snake_case")]
1477pub enum ToolChoiceParam {
1478 /// Constrains the tools available to the model to a pre-defined set.
1479 AllowedTools(ToolChoiceAllowed),
1480
1481 /// Use this option to force the model to call a specific function.
1482 Function(ToolChoiceFunction),
1483
1484 /// Use this option to force the model to call a specific tool on a remote MCP server.
1485 Mcp(ToolChoiceMCP),
1486
1487 /// Use this option to force the model to call a custom tool.
1488 Custom(ToolChoiceCustom),
1489
1490 /// Forces the model to call the apply_patch tool when executing a tool call.
1491 ApplyPatch,
1492
1493 /// Forces the model to call the function shell tool when a tool call is required.
1494 Shell,
1495
1496 /// Indicates that the model should use a built-in tool to generate a response.
1497 /// [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
1498 #[serde(untagged)]
1499 Hosted(ToolChoiceTypes),
1500
1501 /// Controls which (if any) tool is called by the model.
1502 ///
1503 /// `none` means the model will not call any tool and instead generates a message.
1504 ///
1505 /// `auto` means the model can pick between generating a message or calling one or
1506 /// more tools.
1507 ///
1508 /// `required` means the model must call one or more tools.
1509 #[serde(untagged)]
1510 Mode(ToolChoiceOptions),
1511}
1512
1513#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
1514#[serde(rename_all = "lowercase")]
1515pub enum ToolChoiceOptions {
1516 None,
1517 Auto,
1518 Required,
1519}
1520
1521/// Error returned by the API when a request fails.
1522#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1523pub struct ErrorObject {
1524 /// The error code for the response.
1525 pub code: String,
1526 /// A human-readable description of the error.
1527 pub message: String,
1528}
1529
1530/// Details about an incomplete response.
1531#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1532pub struct IncompleteDetails {
1533 /// The reason why the response is incomplete.
1534 pub reason: String,
1535}
1536
1537#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1538pub struct TopLogProb {
1539 pub bytes: Vec<u8>,
1540 pub logprob: f64,
1541 pub token: String,
1542}
1543
1544#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1545pub struct LogProb {
1546 pub bytes: Vec<u8>,
1547 pub logprob: f64,
1548 pub token: String,
1549 pub top_logprobs: Vec<TopLogProb>,
1550}
1551
1552#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1553pub struct ResponseTopLobProb {
1554 /// The log probability of this token.
1555 pub logprob: f64,
1556 /// A possible text token.
1557 pub token: String,
1558}
1559
1560#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1561pub struct ResponseLogProb {
1562 /// The log probability of this token.
1563 pub logprob: f64,
1564 /// A possible text token.
1565 pub token: String,
1566 /// The log probability of the top 20 most likely tokens.
1567 pub top_logprobs: Vec<ResponseTopLobProb>,
1568}
1569
1570/// A simple text output from the model.
1571#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1572pub struct OutputTextContent {
1573 /// The annotations of the text output.
1574 pub annotations: Vec<Annotation>,
1575 pub logprobs: Option<Vec<LogProb>>,
1576 /// The text output from the model.
1577 pub text: String,
1578}
1579
1580#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1581#[serde(tag = "type", rename_all = "snake_case")]
1582pub enum Annotation {
1583 /// A citation to a file.
1584 FileCitation(FileCitationBody),
1585 /// A citation for a web resource used to generate a model response.
1586 UrlCitation(UrlCitationBody),
1587 /// A citation for a container file used to generate a model response.
1588 ContainerFileCitation(ContainerFileCitationBody),
1589 /// A path to a file.
1590 FilePath(FilePath),
1591}
1592
1593#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1594pub struct FileCitationBody {
1595 /// The ID of the file.
1596 file_id: String,
1597 /// The filename of the file cited.
1598 filename: String,
1599 /// The index of the file in the list of files.
1600 index: u32,
1601}
1602
1603#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1604pub struct UrlCitationBody {
1605 /// The index of the last character of the URL citation in the message.
1606 end_index: u32,
1607 /// The index of the first character of the URL citation in the message.
1608 start_index: u32,
1609 /// The title of the web resource.
1610 title: String,
1611 /// The URL of the web resource.
1612 url: String,
1613}
1614
1615#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1616pub struct ContainerFileCitationBody {
1617 /// The ID of the container file.
1618 container_id: String,
1619 /// The index of the last character of the container file citation in the message.
1620 end_index: u32,
1621 /// The ID of the file.
1622 file_id: String,
1623 /// The filename of the container file cited.
1624 filename: String,
1625 /// The index of the first character of the container file citation in the message.
1626 start_index: u32,
1627}
1628
1629#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1630pub struct FilePath {
1631 /// The ID of the file.
1632 file_id: String,
1633 /// The index of the file in the list of files.
1634 index: u32,
1635}
1636
1637/// A refusal explanation from the model.
1638#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1639pub struct RefusalContent {
1640 /// The refusal explanation from the model.
1641 pub refusal: String,
1642}
1643
1644/// A message generated by the model.
1645#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1646pub struct OutputMessage {
1647 /// The content of the output message.
1648 pub content: Vec<OutputMessageContent>,
1649 /// The unique ID of the output message.
1650 pub id: String,
1651 /// The role of the output message. Always `assistant`.
1652 pub role: AssistantRole,
1653 /// The status of the message input. One of `in_progress`, `completed`, or
1654 /// `incomplete`. Populated when input items are returned via API.
1655 pub status: OutputStatus,
1656 ///// The type of the output message. Always `message`.
1657 //pub r#type: MessageType,
1658}
1659
1660#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
1661#[serde(rename_all = "lowercase")]
1662pub enum MessageType {
1663 #[default]
1664 Message,
1665}
1666
1667/// The role for an output message - always `assistant`.
1668/// This type ensures type safety by only allowing the assistant role.
1669#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
1670#[serde(rename_all = "lowercase")]
1671pub enum AssistantRole {
1672 #[default]
1673 Assistant,
1674}
1675
1676#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1677#[serde(tag = "type", rename_all = "snake_case")]
1678pub enum OutputMessageContent {
1679 /// A text output from the model.
1680 OutputText(OutputTextContent),
1681 /// A refusal from the model.
1682 Refusal(RefusalContent),
1683}
1684
1685#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1686#[serde(tag = "type", rename_all = "snake_case")]
1687pub enum OutputContent {
1688 /// A text output from the model.
1689 OutputText(OutputTextContent),
1690 /// A refusal from the model.
1691 Refusal(RefusalContent),
1692 /// Reasoning text from the model.
1693 ReasoningText(ReasoningTextContent),
1694}
1695
1696#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1697pub struct ReasoningTextContent {
1698 /// The reasoning text from the model.
1699 pub text: String,
1700}
1701
1702/// A reasoning item representing the model's chain of thought, including summary paragraphs.
1703#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1704pub struct ReasoningItem {
1705 /// Unique identifier of the reasoning content.
1706 pub id: String,
1707 /// Reasoning summary content.
1708 pub summary: Vec<SummaryPart>,
1709 /// Reasoning text content.
1710 #[serde(skip_serializing_if = "Option::is_none")]
1711 pub content: Option<Vec<ReasoningTextContent>>,
1712 /// The encrypted content of the reasoning item - populated when a response is generated with
1713 /// `reasoning.encrypted_content` in the `include` parameter.
1714 #[serde(skip_serializing_if = "Option::is_none")]
1715 pub encrypted_content: Option<String>,
1716 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1717 /// Populated when items are returned via API.
1718 #[serde(skip_serializing_if = "Option::is_none")]
1719 pub status: Option<OutputStatus>,
1720}
1721
1722/// A single summary text fragment from reasoning.
1723#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1724pub struct Summary {
1725 /// A summary of the reasoning output from the model so far.
1726 pub text: String,
1727}
1728
1729#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1730#[serde(tag = "type", rename_all = "snake_case")]
1731pub enum SummaryPart {
1732 SummaryText(Summary),
1733}
1734
1735/// File search tool call output.
1736#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1737pub struct FileSearchToolCall {
1738 /// The unique ID of the file search tool call.
1739 pub id: String,
1740 /// The queries used to search for files.
1741 pub queries: Vec<String>,
1742 /// The status of the file search tool call. One of `in_progress`, `searching`,
1743 /// `incomplete`,`failed`, or `completed`.
1744 pub status: FileSearchToolCallStatus,
1745 /// The results of the file search tool call.
1746 #[serde(skip_serializing_if = "Option::is_none")]
1747 pub results: Option<Vec<FileSearchToolCallResult>>,
1748}
1749
1750#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1751#[serde(rename_all = "snake_case")]
1752pub enum FileSearchToolCallStatus {
1753 InProgress,
1754 Searching,
1755 Incomplete,
1756 Failed,
1757 Completed,
1758}
1759
1760/// A single result from a file search.
1761#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1762pub struct FileSearchToolCallResult {
1763 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
1764 /// additional information about the object in a structured format, and querying for objects
1765 /// API or the dashboard. Keys are strings with a maximum length of 64 characters
1766 /// . Values are strings with a maximum length of 512 characters, booleans, or numbers.
1767 pub attributes: HashMap<String, serde_json::Value>,
1768 /// The unique ID of the file.
1769 pub file_id: String,
1770 /// The name of the file.
1771 pub filename: String,
1772 /// The relevance score of the file - a value between 0 and 1.
1773 pub score: f32,
1774 /// The text that was retrieved from the file.
1775 pub text: String,
1776}
1777
1778#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1779pub struct ComputerCallSafetyCheckParam {
1780 /// The ID of the pending safety check.
1781 pub id: String,
1782 /// The type of the pending safety check.
1783 #[serde(skip_serializing_if = "Option::is_none")]
1784 pub code: Option<String>,
1785 /// Details about the pending safety check.
1786 #[serde(skip_serializing_if = "Option::is_none")]
1787 pub message: Option<String>,
1788}
1789
1790#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1791#[serde(rename_all = "snake_case")]
1792pub enum WebSearchToolCallStatus {
1793 InProgress,
1794 Searching,
1795 Completed,
1796 Failed,
1797}
1798
1799#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1800pub struct WebSearchActionSearchSource {
1801 /// The type of source. Always `url`.
1802 pub r#type: String,
1803 /// The URL of the source.
1804 pub url: String,
1805}
1806
1807#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1808pub struct WebSearchActionSearch {
1809 /// The search query.
1810 pub query: String,
1811 /// The sources used in the search.
1812 pub sources: Option<Vec<WebSearchActionSearchSource>>,
1813}
1814
1815#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1816pub struct WebSearchActionOpenPage {
1817 /// The URL opened by the model.
1818 pub url: String,
1819}
1820
1821#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1822pub struct WebSearchActionFind {
1823 /// The URL of the page searched for the pattern.
1824 pub url: String,
1825 /// The pattern or text to search for within the page.
1826 pub pattern: String,
1827}
1828
1829#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1830#[serde(tag = "type", rename_all = "snake_case")]
1831pub enum WebSearchToolCallAction {
1832 /// Action type "search" - Performs a web search query.
1833 Search(WebSearchActionSearch),
1834 /// Action type "open_page" - Opens a specific URL from search results.
1835 OpenPage(WebSearchActionOpenPage),
1836 /// Action type "find": Searches for a pattern within a loaded page.
1837 Find(WebSearchActionFind),
1838}
1839
1840/// Web search tool call output.
1841#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1842pub struct WebSearchToolCall {
1843 /// An object describing the specific action taken in this web search call. Includes
1844 /// details on how the model used the web (search, open_page, find).
1845 pub action: WebSearchToolCallAction,
1846 /// The unique ID of the web search tool call.
1847 pub id: String,
1848 /// The status of the web search tool call.
1849 pub status: WebSearchToolCallStatus,
1850}
1851
1852/// Output from a computer tool call.
1853#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1854pub struct ComputerToolCall {
1855 pub action: ComputerAction,
1856 /// An identifier used when responding to the tool call with output.
1857 pub call_id: String,
1858 /// The unique ID of the computer call.
1859 pub id: String,
1860 /// The pending safety checks for the computer call.
1861 pub pending_safety_checks: Vec<ComputerCallSafetyCheckParam>,
1862 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1863 /// Populated when items are returned via API.
1864 pub status: OutputStatus,
1865}
1866
1867/// A point in 2D space.
1868#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1869pub struct DragPoint {
1870 /// The x-coordinate.
1871 pub x: i32,
1872 /// The y-coordinate.
1873 pub y: i32,
1874}
1875
1876/// Represents all user‐triggered actions.
1877#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1878#[serde(tag = "type", rename_all = "snake_case")]
1879pub enum ComputerAction {
1880 /// A click action.
1881 Click(ClickParam),
1882
1883 /// A double click action.
1884 DoubleClick(DoubleClickAction),
1885
1886 /// A drag action.
1887 Drag(Drag),
1888
1889 /// A collection of keypresses the model would like to perform.
1890 Keypress(KeyPressAction),
1891
1892 /// A mouse move action.
1893 Move(Move),
1894
1895 /// A screenshot action.
1896 Screenshot,
1897
1898 /// A scroll action.
1899 Scroll(Scroll),
1900
1901 /// An action to type in text.
1902 Type(Type),
1903
1904 /// A wait action.
1905 Wait,
1906}
1907
1908#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1909#[serde(rename_all = "lowercase")]
1910pub enum ClickButtonType {
1911 Left,
1912 Right,
1913 Wheel,
1914 Back,
1915 Forward,
1916}
1917
1918/// A click action.
1919#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1920pub struct ClickParam {
1921 /// Indicates which mouse button was pressed during the click. One of `left`,
1922 /// `right`, `wheel`, `back`, or `forward`.
1923 pub button: ClickButtonType,
1924 /// The x-coordinate where the click occurred.
1925 pub x: i32,
1926 /// The y-coordinate where the click occurred.
1927 pub y: i32,
1928}
1929
1930/// A double click action.
1931#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1932pub struct DoubleClickAction {
1933 /// The x-coordinate where the double click occurred.
1934 pub x: i32,
1935 /// The y-coordinate where the double click occurred.
1936 pub y: i32,
1937}
1938
1939/// A drag action.
1940#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1941pub struct Drag {
1942 /// The path of points the cursor drags through.
1943 pub path: Vec<DragPoint>,
1944}
1945
1946/// A keypress action.
1947#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1948pub struct KeyPressAction {
1949 /// The combination of keys the model is requesting to be pressed.
1950 /// This is an array of strings, each representing a key.
1951 pub keys: Vec<String>,
1952}
1953
1954/// A mouse move action.
1955#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1956pub struct Move {
1957 /// The x-coordinate to move to.
1958 pub x: i32,
1959 /// The y-coordinate to move to.
1960 pub y: i32,
1961}
1962
1963/// A scroll action.
1964#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1965pub struct Scroll {
1966 /// The horizontal scroll distance.
1967 pub scroll_x: i32,
1968 /// The vertical scroll distance.
1969 pub scroll_y: i32,
1970 /// The x-coordinate where the scroll occurred.
1971 pub x: i32,
1972 /// The y-coordinate where the scroll occurred.
1973 pub y: i32,
1974}
1975
1976/// A typing (text entry) action.
1977#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
1978pub struct Type {
1979 /// The text to type.
1980 pub text: String,
1981}
1982
1983#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
1984pub struct FunctionToolCall {
1985 /// A JSON string of the arguments to pass to the function.
1986 pub arguments: String,
1987 /// The unique ID of the function tool call generated by the model.
1988 pub call_id: String,
1989 /// The name of the function to run.
1990 pub name: String,
1991 /// The unique ID of the function tool call.
1992 #[serde(skip_serializing_if = "Option::is_none")]
1993 pub id: Option<String>,
1994 /// The status of the item. One of `in_progress`, `completed`, or `incomplete`.
1995 /// Populated when items are returned via API.
1996 #[serde(skip_serializing_if = "Option::is_none")]
1997 pub status: Option<OutputStatus>, // TODO rename OutputStatus?
1998}
1999
2000#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2001#[serde(rename_all = "snake_case")]
2002pub enum ImageGenToolCallStatus {
2003 InProgress,
2004 Completed,
2005 Generating,
2006 Failed,
2007}
2008
2009#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2010pub struct ImageGenToolCall {
2011 /// The unique ID of the image generation call.
2012 pub id: String,
2013 /// The generated image encoded in base64.
2014 pub result: Option<String>,
2015 /// The status of the image generation call.
2016 pub status: ImageGenToolCallStatus,
2017}
2018
2019#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2020#[serde(rename_all = "snake_case")]
2021pub enum CodeInterpreterToolCallStatus {
2022 InProgress,
2023 Completed,
2024 Incomplete,
2025 Interpreting,
2026 Failed,
2027}
2028
2029/// Output of a code interpreter request.
2030#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2031pub struct CodeInterpreterToolCall {
2032 /// The code to run, or null if not available.
2033 #[serde(skip_serializing_if = "Option::is_none")]
2034 pub code: Option<String>,
2035 /// ID of the container used to run the code.
2036 pub container_id: String,
2037 /// The unique ID of the code interpreter tool call.
2038 pub id: String,
2039 /// The outputs generated by the code interpreter, such as logs or images.
2040 /// Can be null if no outputs are available.
2041 #[serde(skip_serializing_if = "Option::is_none")]
2042 pub outputs: Option<Vec<CodeInterpreterToolCallOutput>>,
2043 /// The status of the code interpreter tool call.
2044 /// Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`.
2045 pub status: CodeInterpreterToolCallStatus,
2046}
2047
2048/// Individual result from a code interpreter: either logs or files.
2049#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2050#[serde(tag = "type", rename_all = "snake_case")]
2051pub enum CodeInterpreterToolCallOutput {
2052 /// Code interpreter output logs
2053 Logs(CodeInterpreterOutputLogs),
2054 /// Code interpreter output image
2055 Image(CodeInterpreterOutputImage),
2056}
2057
2058#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2059pub struct CodeInterpreterOutputLogs {
2060 /// The logs output from the code interpreter.
2061 pub logs: String,
2062}
2063
2064#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2065pub struct CodeInterpreterOutputImage {
2066 /// The URL of the image output from the code interpreter.
2067 pub url: String,
2068}
2069
2070#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2071pub struct CodeInterpreterFile {
2072 /// The ID of the file.
2073 file_id: String,
2074 /// The MIME type of the file.
2075 mime_type: String,
2076}
2077
2078#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2079pub struct LocalShellToolCall {
2080 /// Execute a shell command on the server.
2081 pub action: LocalShellExecAction,
2082 /// The unique ID of the local shell tool call generated by the model.
2083 pub call_id: String,
2084 /// The unique ID of the local shell call.
2085 pub id: String,
2086 /// The status of the local shell call.
2087 pub status: OutputStatus,
2088}
2089
2090/// Define the shape of a local shell action (exec).
2091#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2092pub struct LocalShellExecAction {
2093 /// The command to run.
2094 pub command: Vec<String>,
2095 /// Environment variables to set for the command.
2096 pub env: HashMap<String, String>,
2097 /// Optional timeout in milliseconds for the command.
2098 pub timeout_ms: Option<u64>,
2099 /// Optional user to run the command as.
2100 pub user: Option<String>,
2101 /// Optional working directory to run the command in.
2102 pub working_directory: Option<String>,
2103}
2104
2105/// Commands and limits describing how to run the function shell tool call.
2106#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2107pub struct FunctionShellActionParam {
2108 /// Ordered shell commands for the execution environment to run.
2109 pub commands: Vec<String>,
2110 /// Maximum wall-clock time in milliseconds to allow the shell commands to run.
2111 #[serde(skip_serializing_if = "Option::is_none")]
2112 pub timeout_ms: Option<u64>,
2113 /// Maximum number of UTF-8 characters to capture from combined stdout and stderr output.
2114 #[serde(skip_serializing_if = "Option::is_none")]
2115 pub max_output_length: Option<u64>,
2116}
2117
2118/// Status values reported for function shell tool calls.
2119#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2120#[serde(rename_all = "snake_case")]
2121pub enum FunctionShellCallItemStatus {
2122 InProgress,
2123 Completed,
2124 Incomplete,
2125}
2126
2127/// A tool representing a request to execute one or more shell commands.
2128#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2129pub struct FunctionShellCallItemParam {
2130 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2131 #[serde(skip_serializing_if = "Option::is_none")]
2132 pub id: Option<String>,
2133 /// The unique ID of the function shell tool call generated by the model.
2134 pub call_id: String,
2135 /// The shell commands and limits that describe how to run the tool call.
2136 pub action: FunctionShellActionParam,
2137 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2138 #[serde(skip_serializing_if = "Option::is_none")]
2139 pub status: Option<FunctionShellCallItemStatus>,
2140}
2141
2142/// Indicates that the shell commands finished and returned an exit code.
2143#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2144pub struct FunctionShellCallOutputExitOutcomeParam {
2145 /// The exit code returned by the shell process.
2146 pub exit_code: i32,
2147}
2148
2149/// The exit or timeout outcome associated with this chunk.
2150#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2151#[serde(tag = "type", rename_all = "snake_case")]
2152pub enum FunctionShellCallOutputOutcomeParam {
2153 Timeout,
2154 Exit(FunctionShellCallOutputExitOutcomeParam),
2155}
2156
2157/// Captured stdout and stderr for a portion of a function shell tool call output.
2158#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2159pub struct FunctionShellCallOutputContentParam {
2160 /// Captured stdout output for this chunk of the shell call.
2161 pub stdout: String,
2162 /// Captured stderr output for this chunk of the shell call.
2163 pub stderr: String,
2164 /// The exit or timeout outcome associated with this chunk.
2165 pub outcome: FunctionShellCallOutputOutcomeParam,
2166}
2167
2168/// The streamed output items emitted by a function shell tool call.
2169#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2170pub struct FunctionShellCallOutputItemParam {
2171 /// The unique ID of the function shell tool call output. Populated when this item is returned via API.
2172 #[serde(skip_serializing_if = "Option::is_none")]
2173 pub id: Option<String>,
2174 /// The unique ID of the function shell tool call generated by the model.
2175 pub call_id: String,
2176 /// Captured chunks of stdout and stderr output, along with their associated outcomes.
2177 pub output: Vec<FunctionShellCallOutputContentParam>,
2178 /// The maximum number of UTF-8 characters captured for this shell call's combined output.
2179 #[serde(skip_serializing_if = "Option::is_none")]
2180 pub max_output_length: Option<u64>,
2181}
2182
2183/// Status values reported for apply_patch tool calls.
2184#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2185#[serde(rename_all = "snake_case")]
2186pub enum ApplyPatchCallStatusParam {
2187 InProgress,
2188 Completed,
2189}
2190
2191/// Instruction for creating a new file via the apply_patch tool.
2192#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2193pub struct ApplyPatchCreateFileOperationParam {
2194 /// Path of the file to create relative to the workspace root.
2195 pub path: String,
2196 /// Unified diff content to apply when creating the file.
2197 pub diff: String,
2198}
2199
2200/// Instruction for deleting an existing file via the apply_patch tool.
2201#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2202pub struct ApplyPatchDeleteFileOperationParam {
2203 /// Path of the file to delete relative to the workspace root.
2204 pub path: String,
2205}
2206
2207/// Instruction for updating an existing file via the apply_patch tool.
2208#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2209pub struct ApplyPatchUpdateFileOperationParam {
2210 /// Path of the file to update relative to the workspace root.
2211 pub path: String,
2212 /// Unified diff content to apply to the existing file.
2213 pub diff: String,
2214}
2215
2216/// One of the create_file, delete_file, or update_file operations supplied to the apply_patch tool.
2217#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2218#[serde(tag = "type", rename_all = "snake_case")]
2219pub enum ApplyPatchOperationParam {
2220 CreateFile(ApplyPatchCreateFileOperationParam),
2221 DeleteFile(ApplyPatchDeleteFileOperationParam),
2222 UpdateFile(ApplyPatchUpdateFileOperationParam),
2223}
2224
2225/// A tool call representing a request to create, delete, or update files using diff patches.
2226#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2227pub struct ApplyPatchToolCallItemParam {
2228 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2229 #[serde(skip_serializing_if = "Option::is_none")]
2230 pub id: Option<String>,
2231 /// The unique ID of the apply patch tool call generated by the model.
2232 pub call_id: String,
2233 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2234 pub status: ApplyPatchCallStatusParam,
2235 /// The specific create, delete, or update instruction for the apply_patch tool call.
2236 pub operation: ApplyPatchOperationParam,
2237}
2238
2239/// Outcome values reported for apply_patch tool call outputs.
2240#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2241#[serde(rename_all = "snake_case")]
2242pub enum ApplyPatchCallOutputStatusParam {
2243 Completed,
2244 Failed,
2245}
2246
2247/// The streamed output emitted by an apply patch tool call.
2248#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2249pub struct ApplyPatchToolCallOutputItemParam {
2250 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2251 #[serde(skip_serializing_if = "Option::is_none")]
2252 pub id: Option<String>,
2253 /// The unique ID of the apply patch tool call generated by the model.
2254 pub call_id: String,
2255 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2256 pub status: ApplyPatchCallOutputStatusParam,
2257 /// Optional human-readable log text from the apply patch tool (e.g., patch results or errors).
2258 #[serde(skip_serializing_if = "Option::is_none")]
2259 pub output: Option<String>,
2260}
2261
2262/// Shell exec action
2263/// Execute a shell command.
2264#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2265pub struct FunctionShellAction {
2266 /// A list of commands to run.
2267 pub commands: Vec<String>,
2268 /// Optional timeout in milliseconds for the commands.
2269 pub timeout_ms: Option<u64>,
2270 /// Optional maximum number of characters to return from each command.
2271 pub max_output_length: Option<u64>,
2272}
2273
2274/// Status values reported for function shell tool calls.
2275#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2276#[serde(rename_all = "snake_case")]
2277pub enum LocalShellCallStatus {
2278 InProgress,
2279 Completed,
2280 Incomplete,
2281}
2282
2283/// A tool call that executes one or more shell commands in a managed environment.
2284#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2285pub struct FunctionShellCall {
2286 /// The unique ID of the function shell tool call. Populated when this item is returned via API.
2287 pub id: String,
2288 /// The unique ID of the function shell tool call generated by the model.
2289 pub call_id: String,
2290 /// The shell commands and limits that describe how to run the tool call.
2291 pub action: FunctionShellAction,
2292 /// The status of the shell call. One of `in_progress`, `completed`, or `incomplete`.
2293 pub status: LocalShellCallStatus,
2294 /// The ID of the entity that created this tool call.
2295 #[serde(skip_serializing_if = "Option::is_none")]
2296 pub created_by: Option<String>,
2297}
2298
2299/// The content of a shell call output.
2300#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2301pub struct FunctionShellCallOutputContent {
2302 pub stdout: String,
2303 pub stderr: String,
2304 /// Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.
2305 #[serde(flatten)]
2306 pub outcome: FunctionShellCallOutputOutcome,
2307 #[serde(skip_serializing_if = "Option::is_none")]
2308 pub created_by: Option<String>,
2309}
2310
2311/// Function shell call outcome
2312#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2313#[serde(tag = "type", rename_all = "snake_case")]
2314pub enum FunctionShellCallOutputOutcome {
2315 Timeout,
2316 Exit(FunctionShellCallOutputExitOutcome),
2317}
2318
2319/// Indicates that the shell commands finished and returned an exit code.
2320#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2321pub struct FunctionShellCallOutputExitOutcome {
2322 /// Exit code from the shell process.
2323 pub exit_code: i32,
2324}
2325
2326/// The output of a shell tool call.
2327#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2328pub struct FunctionShellCallOutput {
2329 /// The unique ID of the shell call output. Populated when this item is returned via API.
2330 pub id: String,
2331 /// The unique ID of the shell tool call generated by the model.
2332 pub call_id: String,
2333 /// An array of shell call output contents
2334 pub output: Vec<FunctionShellCallOutputContent>,
2335 /// The maximum length of the shell command output. This is generated by the model and should be
2336 /// passed back with the raw output.
2337 pub max_output_length: Option<u64>,
2338 #[serde(skip_serializing_if = "Option::is_none")]
2339 pub created_by: Option<String>,
2340}
2341
2342/// Status values reported for apply_patch tool calls.
2343#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2344#[serde(rename_all = "snake_case")]
2345pub enum ApplyPatchCallStatus {
2346 InProgress,
2347 Completed,
2348}
2349
2350/// Instruction describing how to create a file via the apply_patch tool.
2351#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2352pub struct ApplyPatchCreateFileOperation {
2353 /// Path of the file to create.
2354 pub path: String,
2355 /// Diff to apply.
2356 pub diff: String,
2357}
2358
2359/// Instruction describing how to delete a file via the apply_patch tool.
2360#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2361pub struct ApplyPatchDeleteFileOperation {
2362 /// Path of the file to delete.
2363 pub path: String,
2364}
2365
2366/// Instruction describing how to update a file via the apply_patch tool.
2367#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2368pub struct ApplyPatchUpdateFileOperation {
2369 /// Path of the file to update.
2370 pub path: String,
2371 /// Diff to apply.
2372 pub diff: String,
2373}
2374
2375/// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2376#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2377#[serde(tag = "type", rename_all = "snake_case")]
2378pub enum ApplyPatchOperation {
2379 CreateFile(ApplyPatchCreateFileOperation),
2380 DeleteFile(ApplyPatchDeleteFileOperation),
2381 UpdateFile(ApplyPatchUpdateFileOperation),
2382}
2383
2384/// A tool call that applies file diffs by creating, deleting, or updating files.
2385#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2386pub struct ApplyPatchToolCall {
2387 /// The unique ID of the apply patch tool call. Populated when this item is returned via API.
2388 pub id: String,
2389 /// The unique ID of the apply patch tool call generated by the model.
2390 pub call_id: String,
2391 /// The status of the apply patch tool call. One of `in_progress` or `completed`.
2392 pub status: ApplyPatchCallStatus,
2393 /// One of the create_file, delete_file, or update_file operations applied via apply_patch.
2394 pub operation: ApplyPatchOperation,
2395 /// The ID of the entity that created this tool call.
2396 #[serde(skip_serializing_if = "Option::is_none")]
2397 pub created_by: Option<String>,
2398}
2399
2400/// Outcome values reported for apply_patch tool call outputs.
2401#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)]
2402#[serde(rename_all = "snake_case")]
2403pub enum ApplyPatchCallOutputStatus {
2404 Completed,
2405 Failed,
2406}
2407
2408/// The output emitted by an apply patch tool call.
2409#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2410pub struct ApplyPatchToolCallOutput {
2411 /// The unique ID of the apply patch tool call output. Populated when this item is returned via API.
2412 pub id: String,
2413 /// The unique ID of the apply patch tool call generated by the model.
2414 pub call_id: String,
2415 /// The status of the apply patch tool call output. One of `completed` or `failed`.
2416 pub status: ApplyPatchCallOutputStatus,
2417 /// Optional textual output returned by the apply patch tool.
2418 pub output: Option<String>,
2419 /// The ID of the entity that created this tool call output.
2420 #[serde(skip_serializing_if = "Option::is_none")]
2421 pub created_by: Option<String>,
2422}
2423
2424/// Output of an MCP server tool invocation.
2425#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2426pub struct MCPToolCall {
2427 /// A JSON string of the arguments passed to the tool.
2428 pub arguments: String,
2429 /// The unique ID of the tool call.
2430 pub id: String,
2431 /// The name of the tool that was run.
2432 pub name: String,
2433 /// The label of the MCP server running the tool.
2434 pub server_label: String,
2435 /// Unique identifier for the MCP tool call approval request. Include this value
2436 /// in a subsequent `mcp_approval_response` input to approve or reject the corresponding
2437 /// tool call.
2438 pub approval_request_id: Option<String>,
2439 /// Error message from the call, if any.
2440 pub error: Option<String>,
2441 /// The output from the tool call.
2442 pub output: Option<String>,
2443 /// The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
2444 /// `calling`, or `failed`.
2445 pub status: Option<MCPToolCallStatus>,
2446}
2447
2448#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2449#[serde(rename_all = "snake_case")]
2450pub enum MCPToolCallStatus {
2451 InProgress,
2452 Completed,
2453 Incomplete,
2454 Calling,
2455 Failed,
2456}
2457
2458#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2459pub struct MCPListTools {
2460 /// The unique ID of the list.
2461 pub id: String,
2462 /// The label of the MCP server.
2463 pub server_label: String,
2464 /// The tools available on the server.
2465 pub tools: Vec<MCPListToolsTool>,
2466 /// Error message if listing failed.
2467 #[serde(skip_serializing_if = "Option::is_none")]
2468 pub error: Option<String>,
2469}
2470
2471#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2472pub struct MCPApprovalRequest {
2473 /// JSON string of arguments for the tool.
2474 pub arguments: String,
2475 /// The unique ID of the approval request.
2476 pub id: String,
2477 /// The name of the tool to run.
2478 pub name: String,
2479 /// The label of the MCP server making the request.
2480 pub server_label: String,
2481}
2482
2483#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2484pub struct InputTokenDetails {
2485 /// The number of tokens that were retrieved from the cache.
2486 /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
2487 pub cached_tokens: u32,
2488}
2489
2490#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2491pub struct OutputTokenDetails {
2492 /// The number of reasoning tokens.
2493 pub reasoning_tokens: u32,
2494}
2495
2496/// Usage statistics for a response.
2497#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2498pub struct ResponseUsage {
2499 /// The number of input tokens.
2500 pub input_tokens: u32,
2501 /// A detailed breakdown of the input tokens.
2502 pub input_tokens_details: InputTokenDetails,
2503 /// The number of output tokens.
2504 pub output_tokens: u32,
2505 /// A detailed breakdown of the output tokens.
2506 pub output_tokens_details: OutputTokenDetails,
2507 /// The total number of tokens used.
2508 pub total_tokens: u32,
2509}
2510
2511#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2512#[serde(untagged)]
2513pub enum Instructions {
2514 /// A text input to the model, equivalent to a text input with the `developer` role.
2515 Text(String),
2516 /// A list of one or many input items to the model, containing different content types.
2517 Array(Vec<InputItem>),
2518}
2519
2520/// The complete response returned by the Responses API.
2521#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2522pub struct Response {
2523 /// Whether to run the model response in the background.
2524 /// [Learn more](https://platform.openai.com/docs/guides/background).
2525 #[serde(skip_serializing_if = "Option::is_none")]
2526 pub background: Option<bool>,
2527
2528 /// Billing information for the response.
2529 #[serde(skip_serializing_if = "Option::is_none")]
2530 pub billing: Option<Billing>,
2531
2532 /// The conversation that this response belongs to. Input items and output
2533 /// items from this response are automatically added to this conversation.
2534 #[serde(skip_serializing_if = "Option::is_none")]
2535 pub conversation: Option<Conversation>,
2536
2537 /// Unix timestamp (in seconds) when this Response was created.
2538 pub created_at: u64,
2539
2540 /// An error object returned when the model fails to generate a Response.
2541 #[serde(skip_serializing_if = "Option::is_none")]
2542 pub error: Option<ErrorObject>,
2543
2544 /// Unique identifier for this response.
2545 pub id: String,
2546
2547 /// Details about why the response is incomplete, if any.
2548 #[serde(skip_serializing_if = "Option::is_none")]
2549 pub incomplete_details: Option<IncompleteDetails>,
2550
2551 /// A system (or developer) message inserted into the model's context.
2552 ///
2553 /// When using along with `previous_response_id`, the instructions from a previous response
2554 /// will not be carried over to the next response. This makes it simple to swap out
2555 /// system (or developer) messages in new responses.
2556 #[serde(skip_serializing_if = "Option::is_none")]
2557 pub instructions: Option<Instructions>,
2558
2559 /// An upper bound for the number of tokens that can be generated for a response,
2560 /// including visible output tokens and
2561 /// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2562 #[serde(skip_serializing_if = "Option::is_none")]
2563 pub max_output_tokens: Option<u32>,
2564
2565 /// Set of 16 key-value pairs that can be attached to an object. This can be
2566 /// useful for storing additional information about the object in a structured
2567 /// format, and querying for objects via API or the dashboard.
2568 ///
2569 /// Keys are strings with a maximum length of 64 characters. Values are strings
2570 /// with a maximum length of 512 characters.
2571 #[serde(skip_serializing_if = "Option::is_none")]
2572 pub metadata: Option<HashMap<String, String>>,
2573
2574 /// Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a
2575 /// wide range of models with different capabilities, performance characteristics,
2576 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
2577 pub model: String,
2578
2579 /// The object type of this resource - always set to `response`.
2580 pub object: String,
2581
2582 /// An array of content items generated by the model.
2583 ///
2584 /// - The length and order of items in the output array is dependent on the model's response.
2585 /// - Rather than accessing the first item in the output array and assuming it's an assistant
2586 /// message with the content generated by the model, you might consider using
2587 /// the `output_text` property where supported in SDKs.
2588 pub output: Vec<OutputItem>,
2589
2590 /// SDK-only convenience property that contains the aggregated text output from all
2591 /// `output_text` items in the `output` array, if any are present.
2592 /// Supported in the Python and JavaScript SDKs.
2593 // #[serde(skip_serializing_if = "Option::is_none")]
2594 // pub output_text: Option<String>,
2595
2596 /// Whether to allow the model to run tool calls in parallel.
2597 #[serde(skip_serializing_if = "Option::is_none")]
2598 pub parallel_tool_calls: Option<bool>,
2599
2600 /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.
2601 /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2602 /// Cannot be used in conjunction with `conversation`.
2603 #[serde(skip_serializing_if = "Option::is_none")]
2604 pub previous_response_id: Option<String>,
2605
2606 /// Reference to a prompt template and its variables.
2607 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2608 #[serde(skip_serializing_if = "Option::is_none")]
2609 pub prompt: Option<Prompt>,
2610
2611 /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces
2612 /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2613 #[serde(skip_serializing_if = "Option::is_none")]
2614 pub prompt_cache_key: Option<String>,
2615
2616 /// The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching,
2617 /// which keeps cached prefixes active for longer, up to a maximum of 24 hours. [Learn
2618 /// more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2619 #[serde(skip_serializing_if = "Option::is_none")]
2620 pub prompt_cache_retention: Option<PromptCacheRetention>,
2621
2622 /// **gpt-5 and o-series models only**
2623 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2624 #[serde(skip_serializing_if = "Option::is_none")]
2625 pub reasoning: Option<Reasoning>,
2626
2627 /// A stable identifier used to help detect users of your application that may be violating OpenAI's
2628 /// usage policies.
2629 ///
2630 /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username
2631 /// or email address, in order to avoid sending us any identifying information. [Learn
2632 /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2633 #[serde(skip_serializing_if = "Option::is_none")]
2634 pub safety_identifier: Option<String>,
2635
2636 /// Specifies the processing type used for serving the request.
2637 /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2638 /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2639 /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2640 /// - When not set, the default behavior is 'auto'.
2641 ///
2642 /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2643 #[serde(skip_serializing_if = "Option::is_none")]
2644 pub service_tier: Option<ServiceTier>,
2645
2646 /// The status of the response generation.
2647 /// One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or `incomplete`.
2648 pub status: Status,
2649
2650 /// What sampling temperature was used, between 0 and 2. Higher values like 0.8 make
2651 /// outputs more random, lower values like 0.2 make output more focused and deterministic.
2652 ///
2653 /// We generally recommend altering this or `top_p` but not both.
2654 #[serde(skip_serializing_if = "Option::is_none")]
2655 pub temperature: Option<f32>,
2656
2657 /// Configuration options for a text response from the model. Can be plain
2658 /// text or structured JSON data. Learn more:
2659 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2660 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2661 #[serde(skip_serializing_if = "Option::is_none")]
2662 pub text: Option<ResponseTextParam>,
2663
2664 /// How the model should select which tool (or tools) to use when generating
2665 /// a response. See the `tools` parameter to see how to specify which tools
2666 /// the model can call.
2667 #[serde(skip_serializing_if = "Option::is_none")]
2668 pub tool_choice: Option<ToolChoiceParam>,
2669
2670 /// An array of tools the model may call while generating a response. You
2671 /// can specify which tool to use by setting the `tool_choice` parameter.
2672 ///
2673 /// We support the following categories of tools:
2674 /// - **Built-in tools**: Tools that are provided by OpenAI that extend the
2675 /// model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)
2676 /// or [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about
2677 /// [built-in tools](https://platform.openai.com/docs/guides/tools).
2678 /// - **MCP Tools**: Integrations with third-party systems via custom MCP servers
2679 /// or predefined connectors such as Google Drive and SharePoint. Learn more about
2680 /// [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2681 /// - **Function calls (custom tools)**: Functions that are defined by you,
2682 /// enabling the model to call your own code with strongly typed arguments
2683 /// and outputs. Learn more about
2684 /// [function calling](https://platform.openai.com/docs/guides/function-calling). You can also use
2685 /// custom tools to call your own code.
2686 #[serde(skip_serializing_if = "Option::is_none")]
2687 pub tools: Option<Vec<Tool>>,
2688
2689 /// An integer between 0 and 20 specifying the number of most likely tokens to return at each
2690 /// token position, each with an associated log probability.
2691 #[serde(skip_serializing_if = "Option::is_none")]
2692 pub top_logprobs: Option<u8>,
2693
2694 /// An alternative to sampling with temperature, called nucleus sampling,
2695 /// where the model considers the results of the tokens with top_p probability
2696 /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
2697 /// are considered.
2698 ///
2699 /// We generally recommend altering this or `temperature` but not both.
2700 #[serde(skip_serializing_if = "Option::is_none")]
2701 pub top_p: Option<f32>,
2702
2703 ///The truncation strategy to use for the model response.
2704 /// - `auto`: If the input to this Response exceeds
2705 /// the model's context window size, the model will truncate the
2706 /// response to fit the context window by dropping items from the beginning of the conversation.
2707 /// - `disabled` (default): If the input size will exceed the context window
2708 /// size for a model, the request will fail with a 400 error.
2709 #[serde(skip_serializing_if = "Option::is_none")]
2710 pub truncation: Option<Truncation>,
2711
2712 /// Represents token usage details including input tokens, output tokens,
2713 /// a breakdown of output tokens, and the total tokens used.
2714 #[serde(skip_serializing_if = "Option::is_none")]
2715 pub usage: Option<ResponseUsage>,
2716}
2717
2718impl Response {
2719 /// SDK-only convenience property that contains the aggregated text output from all
2720 /// `output_text` items in the `output` array, if any are present.
2721 pub fn output_text(&self) -> Option<String> {
2722 let output = self
2723 .output
2724 .iter()
2725 .filter_map(|item| match item {
2726 OutputItem::Message(msg) => Some(
2727 msg.content
2728 .iter()
2729 .filter_map(|content| match content {
2730 OutputMessageContent::OutputText(ot) => Some(ot.text.clone()),
2731 _ => None,
2732 })
2733 .collect::<Vec<String>>(),
2734 ),
2735 _ => None,
2736 })
2737 .flatten()
2738 .collect::<Vec<String>>()
2739 .join("");
2740 if output.is_empty() {
2741 None
2742 } else {
2743 Some(output)
2744 }
2745 }
2746}
2747
2748#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2749#[serde(rename_all = "snake_case")]
2750pub enum Status {
2751 Completed,
2752 Failed,
2753 InProgress,
2754 Cancelled,
2755 Queued,
2756 Incomplete,
2757}
2758
2759/// Output item
2760#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2761#[serde(tag = "type")]
2762#[serde(rename_all = "snake_case")]
2763pub enum OutputItem {
2764 /// An output message from the model.
2765 Message(OutputMessage),
2766 /// The results of a file search tool call. See the
2767 /// [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
2768 /// for more information.
2769 FileSearchCall(FileSearchToolCall),
2770 /// A tool call to run a function. See the
2771 /// [function calling guide](https://platform.openai.com/docs/guides/function-calling)
2772 /// for more information.
2773 FunctionCall(FunctionToolCall),
2774 /// The results of a web search tool call. See the
2775 /// [web search guide](https://platform.openai.com/docs/guides/tools-web-search)
2776 /// for more information.
2777 WebSearchCall(WebSearchToolCall),
2778 /// A tool call to a computer use tool. See the
2779 /// [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
2780 /// for more information.
2781 ComputerCall(ComputerToolCall),
2782 /// A description of the chain of thought used by a reasoning model while generating
2783 /// a response. Be sure to include these items in your `input` to the Responses API for
2784 /// subsequent turns of a conversation if you are manually
2785 /// [managing context](https://platform.openai.com/docs/guides/conversation-state).
2786 Reasoning(ReasoningItem),
2787 /// An image generation request made by the model.
2788 ImageGenerationCall(ImageGenToolCall),
2789 /// A tool call to run code.
2790 CodeInterpreterCall(CodeInterpreterToolCall),
2791 /// A tool call to run a command on the local shell.
2792 LocalShellCall(LocalShellToolCall),
2793 /// A tool call that executes one or more shell commands in a managed environment.
2794 ShellCall(FunctionShellCall),
2795 /// The output of a shell tool call.
2796 ShellCallOutput(FunctionShellCallOutput),
2797 /// A tool call that applies file diffs by creating, deleting, or updating files.
2798 ApplyPatchCall(ApplyPatchToolCall),
2799 /// The output emitted by an apply patch tool call.
2800 ApplyPatchCallOutput(ApplyPatchToolCallOutput),
2801 /// An invocation of a tool on an MCP server.
2802 McpCall(MCPToolCall),
2803 /// A list of tools available on an MCP server.
2804 McpListTools(MCPListTools),
2805 /// A request for human approval of a tool invocation.
2806 McpApprovalRequest(MCPApprovalRequest),
2807 /// A call to a custom tool created by the model.
2808 CustomToolCall(CustomToolCall),
2809}
2810
2811#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2812#[non_exhaustive]
2813pub struct CustomToolCall {
2814 /// An identifier used to map this custom tool call to a tool call output.
2815 pub call_id: String,
2816 /// The input for the custom tool call generated by the model.
2817 pub input: String,
2818 /// The name of the custom tool being called.
2819 pub name: String,
2820 /// The unique ID of the custom tool call in the OpenAI platform.
2821 pub id: String,
2822}
2823
2824#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2825pub struct DeleteResponse {
2826 pub object: String,
2827 pub deleted: bool,
2828 pub id: String,
2829}
2830
2831#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2832pub struct AnyItemReference {
2833 pub r#type: Option<String>,
2834 pub id: String,
2835}
2836
2837#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2838#[serde(tag = "type", rename_all = "snake_case")]
2839pub enum ItemResourceItem {
2840 Message(MessageItem),
2841 FileSearchCall(FileSearchToolCall),
2842 ComputerCall(ComputerToolCall),
2843 ComputerCallOutput(ComputerCallOutputItemParam),
2844 WebSearchCall(WebSearchToolCall),
2845 FunctionCall(FunctionToolCall),
2846 FunctionCallOutput(FunctionCallOutputItemParam),
2847 ImageGenerationCall(ImageGenToolCall),
2848 CodeInterpreterCall(CodeInterpreterToolCall),
2849 LocalShellCall(LocalShellToolCall),
2850 LocalShellCallOutput(LocalShellToolCallOutput),
2851 ShellCall(FunctionShellCallItemParam),
2852 ShellCallOutput(FunctionShellCallOutputItemParam),
2853 ApplyPatchCall(ApplyPatchToolCallItemParam),
2854 ApplyPatchCallOutput(ApplyPatchToolCallOutputItemParam),
2855 McpListTools(MCPListTools),
2856 McpApprovalRequest(MCPApprovalRequest),
2857 McpApprovalResponse(MCPApprovalResponse),
2858 McpCall(MCPToolCall),
2859}
2860
2861#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2862#[serde(untagged)]
2863pub enum ItemResource {
2864 ItemReference(AnyItemReference),
2865 Item(ItemResourceItem),
2866}
2867
2868/// A list of Response items.
2869#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2870pub struct ResponseItemList {
2871 /// The type of object returned, must be `list`.
2872 pub object: String,
2873 /// The ID of the first item in the list.
2874 pub first_id: Option<String>,
2875 /// The ID of the last item in the list.
2876 pub last_id: Option<String>,
2877 /// Whether there are more items in the list.
2878 pub has_more: bool,
2879 /// The list of items.
2880 pub data: Vec<ItemResource>,
2881}
2882
2883#[derive(Clone, Serialize, Deserialize, Debug, Default, Builder, PartialEq)]
2884#[builder(
2885 name = "TokenCountsBodyArgs",
2886 pattern = "mutable",
2887 setter(into, strip_option),
2888 default
2889)]
2890#[builder(build_fn(error = "OpenAIError"))]
2891pub struct TokenCountsBody {
2892 /// The conversation that this response belongs to. Items from this
2893 /// conversation are prepended to `input_items` for this response request.
2894 /// Input items and output items from this response are automatically added to this
2895 /// conversation after this response completes.
2896 #[serde(skip_serializing_if = "Option::is_none")]
2897 pub conversation: Option<ConversationParam>,
2898
2899 /// Text, image, or file inputs to the model, used to generate a response
2900 #[serde(skip_serializing_if = "Option::is_none")]
2901 pub input: Option<InputParam>,
2902
2903 /// A system (or developer) message inserted into the model's context.
2904 ///
2905 /// When used along with `previous_response_id`, the instructions from a previous response will
2906 /// not be carried over to the next response. This makes it simple to swap out system (or
2907 /// developer) messages in new responses.
2908 #[serde(skip_serializing_if = "Option::is_none")]
2909 pub instructions: Option<String>,
2910
2911 /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2912 /// wide range of models with different capabilities, performance characteristics,
2913 /// and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
2914 /// to browse and compare available models.
2915 #[serde(skip_serializing_if = "Option::is_none")]
2916 pub model: Option<String>,
2917
2918 /// Whether to allow the model to run tool calls in parallel.
2919 #[serde(skip_serializing_if = "Option::is_none")]
2920 pub parallel_tool_calls: Option<bool>,
2921
2922 /// The unique ID of the previous response to the model. Use this to create multi-turn
2923 /// conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2924 /// Cannot be used in conjunction with `conversation`.
2925 #[serde(skip_serializing_if = "Option::is_none")]
2926 pub previous_response_id: Option<String>,
2927
2928 /// **gpt-5 and o-series models only**
2929 /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2930 #[serde(skip_serializing_if = "Option::is_none")]
2931 pub reasoning: Option<Reasoning>,
2932
2933 /// Configuration options for a text response from the model. Can be plain
2934 /// text or structured JSON data. Learn more:
2935 /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2936 /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2937 #[serde(skip_serializing_if = "Option::is_none")]
2938 pub text: Option<ResponseTextParam>,
2939
2940 /// How the model should select which tool (or tools) to use when generating
2941 /// a response. See the `tools` parameter to see how to specify which tools
2942 /// the model can call.
2943 #[serde(skip_serializing_if = "Option::is_none")]
2944 pub tool_choice: Option<ToolChoiceParam>,
2945
2946 /// An array of tools the model may call while generating a response. You can specify which tool
2947 /// to use by setting the `tool_choice` parameter.
2948 #[serde(skip_serializing_if = "Option::is_none")]
2949 pub tools: Option<Vec<Tool>>,
2950
2951 ///The truncation strategy to use for the model response.
2952 /// - `auto`: If the input to this Response exceeds
2953 /// the model's context window size, the model will truncate the
2954 /// response to fit the context window by dropping items from the beginning of the conversation.
2955 /// - `disabled` (default): If the input size will exceed the context window
2956 /// size for a model, the request will fail with a 400 error.
2957 #[serde(skip_serializing_if = "Option::is_none")]
2958 pub truncation: Option<Truncation>,
2959}
2960
2961#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
2962pub struct TokenCountsResource {
2963 pub object: String,
2964 pub input_tokens: u32,
2965}