async_openai/types/batches/batch.rs
1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::error::OpenAIError;
7use crate::types::responses::ResponseUsage;
8use crate::types::Metadata;
9
10#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq, Deserialize)]
11#[builder(name = "BatchRequestArgs")]
12#[builder(pattern = "mutable")]
13#[builder(setter(into, strip_option), default)]
14#[builder(derive(Debug))]
15#[builder(build_fn(error = "OpenAIError"))]
16pub struct BatchRequest {
17 /// The ID of an uploaded file that contains requests for the new batch.
18 ///
19 /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file.
20 ///
21 /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size.
22 pub input_file_id: String,
23
24 /// The endpoint to be used for all requests in the batch. Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.
25 pub endpoint: BatchEndpoint,
26
27 /// The time frame within which the batch should be processed. Currently only `24h` is supported.
28 pub completion_window: BatchCompletionWindow,
29
30 /// Optional custom metadata for the batch.
31 pub metadata: Option<HashMap<String, serde_json::Value>>,
32
33 /// The expiration policy for the output and/or error file that are generated for a batch.
34 pub output_expires_after: Option<BatchFileExpirationAfter>,
35}
36
37#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Default)]
38pub enum BatchEndpoint {
39 #[default]
40 #[serde(rename = "/v1/responses")]
41 V1Responses,
42 #[serde(rename = "/v1/chat/completions")]
43 V1ChatCompletions,
44 #[serde(rename = "/v1/embeddings")]
45 V1Embeddings,
46 #[serde(rename = "/v1/completions")]
47 V1Completions,
48}
49
50#[derive(Debug, Clone, PartialEq, Serialize, Default, Deserialize)]
51pub enum BatchCompletionWindow {
52 #[default]
53 #[serde(rename = "24h")]
54 W24H,
55}
56
57/// File expiration policy
58///
59/// The expiration policy for the output and/or error file that are generated for a batch.
60#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
61pub struct BatchFileExpirationAfter {
62 /// Anchor timestamp after which the expiration policy applies. Supported anchors: `created_at`. Note that the anchor is the file creation time, not the time the batch is created.
63 pub anchor: BatchFileExpirationAnchor,
64 /// The number of seconds after the anchor time that the file will expire. Must be between 3600 (1 hour) and 2592000 (30 days).
65 pub seconds: u32,
66}
67
68#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
69#[serde(rename_all = "snake_case")]
70pub enum BatchFileExpirationAnchor {
71 CreatedAt,
72}
73
74#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
75pub struct Batch {
76 pub id: String,
77 /// The object type, which is always `batch`.
78 pub object: String,
79 /// The OpenAI API endpoint used by the batch.
80 pub endpoint: String,
81 /// Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
82 pub model: Option<String>,
83 pub errors: Option<BatchErrors>,
84 /// The ID of the input file for the batch.
85 pub input_file_id: String,
86 /// The time frame within which the batch should be processed.
87 pub completion_window: String,
88 /// The current status of the batch.
89 pub status: BatchStatus,
90 /// The ID of the file containing the outputs of successfully executed requests.
91 pub output_file_id: Option<String>,
92 /// The ID of the file containing the outputs of requests with errors.
93 pub error_file_id: Option<String>,
94 /// The Unix timestamp (in seconds) for when the batch was created.
95 pub created_at: u32,
96 /// The Unix timestamp (in seconds) for when the batch started processing.
97 pub in_progress_at: Option<u32>,
98 /// The Unix timestamp (in seconds) for when the batch will expire.
99 pub expires_at: Option<u32>,
100 /// The Unix timestamp (in seconds) for when the batch started finalizing.
101 pub finalizing_at: Option<u32>,
102 /// The Unix timestamp (in seconds) for when the batch was completed.
103 pub completed_at: Option<u32>,
104 /// The Unix timestamp (in seconds) for when the batch failed.
105 pub failed_at: Option<u32>,
106 /// The Unix timestamp (in seconds) for when the batch expired.
107 pub expired_at: Option<u32>,
108 /// The Unix timestamp (in seconds) for when the batch started cancelling.
109 pub cancelling_at: Option<u32>,
110 /// The Unix timestamp (in seconds) for when the batch was cancelled.
111 pub cancelled_at: Option<u32>,
112 /// The request counts for different statuses within the batch.
113 pub request_counts: Option<BatchRequestCounts>,
114 /// Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used. Only populated on batches created after September 7, 2025.
115 pub usage: Option<ResponseUsage>,
116 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
117 pub metadata: Option<Metadata>,
118}
119
120#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
121pub struct BatchErrors {
122 /// The object type, which is always `list`.
123 pub object: String,
124 pub data: Vec<BatchError>,
125}
126
127#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
128pub struct BatchError {
129 /// An error code identifying the error type.
130 pub code: String,
131 /// A human-readable message providing more details about the error.
132 pub message: String,
133 /// The name of the parameter that caused the error, if applicable.
134 pub param: Option<String>,
135 /// The line number of the input file where the error occurred, if applicable.
136 pub line: Option<u32>,
137}
138
139#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
140#[serde(rename_all = "snake_case")]
141pub enum BatchStatus {
142 Validating,
143 Failed,
144 InProgress,
145 Finalizing,
146 Completed,
147 Expired,
148 Cancelling,
149 Cancelled,
150}
151
152#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
153pub struct BatchRequestCounts {
154 /// Total number of requests in the batch.
155 pub total: u32,
156 /// Number of requests that have been completed successfully.
157 pub completed: u32,
158 /// Number of requests that have failed.
159 pub failed: u32,
160}
161
162#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
163pub struct ListBatchesResponse {
164 pub data: Vec<Batch>,
165 pub first_id: Option<String>,
166 pub last_id: Option<String>,
167 pub has_more: bool,
168 pub object: String,
169}
170
171#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
172#[serde(rename_all = "UPPERCASE")]
173pub enum BatchRequestInputMethod {
174 POST,
175}
176
177/// The per-line object of the batch input file
178#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
179pub struct BatchRequestInput {
180 /// A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch.
181 pub custom_id: String,
182 /// The HTTP method to be used for the request. Currently only `POST` is supported.
183 pub method: BatchRequestInputMethod,
184 /// The OpenAI API relative URL to be used for the request. Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
185 pub url: BatchEndpoint,
186 pub body: Option<serde_json::Value>,
187}
188
189#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
190pub struct BatchRequestOutputResponse {
191 /// The HTTP status code of the response
192 pub status_code: u16,
193 /// An unique identifier for the OpenAI API request. Please include this request ID when contacting support.
194 pub request_id: String,
195 /// The JSON body of the response
196 pub body: serde_json::Value,
197}
198
199#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
200pub struct BatchRequestOutputError {
201 /// A machine-readable error code.
202 pub code: String,
203 /// A human-readable error message.
204 pub message: String,
205}
206
207/// The per-line object of the batch output and error files
208#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
209pub struct BatchRequestOutput {
210 pub id: String,
211 /// A developer-provided per-request id that will be used to match outputs to inputs.
212 pub custom_id: String,
213 pub response: Option<BatchRequestOutputResponse>,
214 /// For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.
215 pub error: Option<BatchRequestOutputError>,
216}