async_openai/types/batches/batch.rs
1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::error::OpenAIError;
7use crate::types::batches::ResponseUsage;
8use crate::types::Metadata;
9
10#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq, Deserialize)]
11#[builder(name = "BatchRequestArgs")]
12#[builder(pattern = "mutable")]
13#[builder(setter(into, strip_option), default)]
14#[builder(derive(Debug))]
15#[builder(build_fn(error = "OpenAIError"))]
16pub struct BatchRequest {
17 /// The ID of an uploaded file that contains requests for the new batch.
18 ///
19 /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file.
20 ///
21 /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size.
22 pub input_file_id: String,
23
24 /// The endpoint to be used for all requests in the batch. Currently `/v1/responses`,
25 /// `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, and `/v1/moderations` are
26 /// supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
27 /// embedding inputs across all requests in the batch.
28 pub endpoint: BatchEndpoint,
29
30 /// The time frame within which the batch should be processed. Currently only `24h` is supported.
31 pub completion_window: BatchCompletionWindow,
32
33 /// Optional custom metadata for the batch.
34 pub metadata: Option<HashMap<String, serde_json::Value>>,
35
36 /// The expiration policy for the output and/or error file that are generated for a batch.
37 pub output_expires_after: Option<BatchFileExpirationAfter>,
38}
39
40#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Default)]
41pub enum BatchEndpoint {
42 #[default]
43 #[serde(rename = "/v1/responses")]
44 V1Responses,
45 #[serde(rename = "/v1/chat/completions")]
46 V1ChatCompletions,
47 #[serde(rename = "/v1/embeddings")]
48 V1Embeddings,
49 #[serde(rename = "/v1/completions")]
50 V1Completions,
51 #[serde(rename = "/v1/moderations")]
52 V1Moderations,
53}
54
55#[derive(Debug, Clone, PartialEq, Serialize, Default, Deserialize)]
56pub enum BatchCompletionWindow {
57 #[default]
58 #[serde(rename = "24h")]
59 W24H,
60}
61
62/// File expiration policy
63///
64/// The expiration policy for the output and/or error file that are generated for a batch.
65#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
66pub struct BatchFileExpirationAfter {
67 /// Anchor timestamp after which the expiration policy applies. Supported anchors: `created_at`. Note that the anchor is the file creation time, not the time the batch is created.
68 pub anchor: BatchFileExpirationAnchor,
69 /// The number of seconds after the anchor time that the file will expire. Must be between 3600 (1 hour) and 2592000 (30 days).
70 pub seconds: u32,
71}
72
73#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
74#[serde(rename_all = "snake_case")]
75pub enum BatchFileExpirationAnchor {
76 CreatedAt,
77}
78
79#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
80pub struct Batch {
81 pub id: String,
82 /// The object type, which is always `batch`.
83 pub object: String,
84 /// The OpenAI API endpoint used by the batch.
85 pub endpoint: String,
86 /// Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
87 pub model: Option<String>,
88 pub errors: Option<BatchErrors>,
89 /// The ID of the input file for the batch.
90 pub input_file_id: String,
91 /// The time frame within which the batch should be processed.
92 pub completion_window: String,
93 /// The current status of the batch.
94 pub status: BatchStatus,
95 /// The ID of the file containing the outputs of successfully executed requests.
96 pub output_file_id: Option<String>,
97 /// The ID of the file containing the outputs of requests with errors.
98 pub error_file_id: Option<String>,
99 /// The Unix timestamp (in seconds) for when the batch was created.
100 pub created_at: u64,
101 /// The Unix timestamp (in seconds) for when the batch started processing.
102 pub in_progress_at: Option<u64>,
103 /// The Unix timestamp (in seconds) for when the batch will expire.
104 pub expires_at: Option<u64>,
105 /// The Unix timestamp (in seconds) for when the batch started finalizing.
106 pub finalizing_at: Option<u64>,
107 /// The Unix timestamp (in seconds) for when the batch was completed.
108 pub completed_at: Option<u64>,
109 /// The Unix timestamp (in seconds) for when the batch failed.
110 pub failed_at: Option<u64>,
111 /// The Unix timestamp (in seconds) for when the batch expired.
112 pub expired_at: Option<u64>,
113 /// The Unix timestamp (in seconds) for when the batch started cancelling.
114 pub cancelling_at: Option<u64>,
115 /// The Unix timestamp (in seconds) for when the batch was cancelled.
116 pub cancelled_at: Option<u64>,
117 /// The request counts for different statuses within the batch.
118 pub request_counts: Option<BatchRequestCounts>,
119 /// Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used. Only populated on batches created after September 7, 2025.
120 pub usage: Option<ResponseUsage>,
121 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
122 pub metadata: Option<Metadata>,
123}
124
125#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
126pub struct BatchErrors {
127 /// The object type, which is always `list`.
128 pub object: String,
129 pub data: Vec<BatchError>,
130}
131
132#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
133pub struct BatchError {
134 /// An error code identifying the error type.
135 pub code: String,
136 /// A human-readable message providing more details about the error.
137 pub message: String,
138 /// The name of the parameter that caused the error, if applicable.
139 pub param: Option<String>,
140 /// The line number of the input file where the error occurred, if applicable.
141 pub line: Option<u32>,
142}
143
144#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
145#[serde(rename_all = "snake_case")]
146pub enum BatchStatus {
147 Validating,
148 Failed,
149 InProgress,
150 Finalizing,
151 Completed,
152 Expired,
153 Cancelling,
154 Cancelled,
155}
156
157#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
158pub struct BatchRequestCounts {
159 /// Total number of requests in the batch.
160 pub total: u32,
161 /// Number of requests that have been completed successfully.
162 pub completed: u32,
163 /// Number of requests that have failed.
164 pub failed: u32,
165}
166
167#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
168pub struct ListBatchesResponse {
169 pub data: Vec<Batch>,
170 pub first_id: Option<String>,
171 pub last_id: Option<String>,
172 pub has_more: bool,
173 pub object: String,
174}
175
176#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
177#[serde(rename_all = "UPPERCASE")]
178pub enum BatchRequestInputMethod {
179 POST,
180}
181
182/// The per-line object of the batch input file
183#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
184pub struct BatchRequestInput {
185 /// A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch.
186 pub custom_id: String,
187 /// The HTTP method to be used for the request. Currently only `POST` is supported.
188 pub method: BatchRequestInputMethod,
189 /// The OpenAI API relative URL to be used for the request. Currently `/v1/responses`,
190 /// `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, and `/v1/moderations` are supported.
191 pub url: BatchEndpoint,
192 pub body: Option<serde_json::Value>,
193}
194
195#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
196pub struct BatchRequestOutputResponse {
197 /// The HTTP status code of the response
198 pub status_code: u16,
199 /// An unique identifier for the OpenAI API request. Please include this request ID when contacting support.
200 pub request_id: String,
201 /// The JSON body of the response
202 pub body: serde_json::Value,
203}
204
205#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
206pub struct BatchRequestOutputError {
207 /// A machine-readable error code.
208 pub code: String,
209 /// A human-readable error message.
210 pub message: String,
211}
212
213/// The per-line object of the batch output and error files
214#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
215pub struct BatchRequestOutput {
216 pub id: String,
217 /// A developer-provided per-request id that will be used to match outputs to inputs.
218 pub custom_id: String,
219 pub response: Option<BatchRequestOutputResponse>,
220 /// For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.
221 pub error: Option<BatchRequestOutputError>,
222}