Skip to main content

async_openai/types/assistants/
assistant.rs

1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::error::OpenAIError;
7
8use crate::types::assistants::{
9    FunctionName, FunctionObject, ResponseFormat, StaticChunkingStrategy,
10};
11
12#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
13pub struct AssistantToolCodeInterpreterResources {
14    ///A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
15    pub file_ids: Vec<String>, // maxItems: 20
16}
17
18#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
19pub struct AssistantToolFileSearchResources {
20    /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
21    pub vector_store_ids: Vec<String>,
22}
23
24#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
25pub struct AssistantToolResources {
26    #[serde(skip_serializing_if = "Option::is_none")]
27    pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
28    #[serde(skip_serializing_if = "Option::is_none")]
29    pub file_search: Option<AssistantToolFileSearchResources>,
30}
31
32#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
33pub struct CreateAssistantToolResources {
34    #[serde(skip_serializing_if = "Option::is_none")]
35    pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
36    #[serde(skip_serializing_if = "Option::is_none")]
37    pub file_search: Option<CreateAssistantToolFileSearchResources>,
38}
39
40#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
41pub struct CreateAssistantToolFileSearchResources {
42    ///  The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
43    pub vector_store_ids: Option<Vec<String>>,
44    /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.
45    pub vector_stores: Option<Vec<AssistantVectorStore>>,
46}
47
48#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
49pub struct AssistantVectorStore {
50    /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.
51    pub file_ids: Vec<String>,
52
53    /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
54    pub chunking_strategy: Option<AssistantVectorStoreChunkingStrategy>,
55
56    /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
57    pub metadata: Option<HashMap<String, String>>,
58}
59
60#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
61#[serde(tag = "type")]
62pub enum AssistantVectorStoreChunkingStrategy {
63    /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
64    #[default]
65    #[serde(rename = "auto")]
66    Auto,
67    #[serde(rename = "static")]
68    Static { r#static: StaticChunkingStrategy },
69}
70
71/// Represents an `assistant` that can call the model and use tools.
72#[deprecated(
73    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
74)]
75#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
76pub struct AssistantObject {
77    /// The identifier, which can be referenced in API endpoints.
78    pub id: String,
79    /// The object type, which is always `assistant`.
80    pub object: String,
81    /// The Unix timestamp (in seconds) for when the assistant was created.
82    pub created_at: u64,
83    /// The name of the assistant. The maximum length is 256 characters.
84    pub name: Option<String>,
85    /// The description of the assistant. The maximum length is 512 characters.
86    pub description: Option<String>,
87    /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.
88    pub model: String,
89    /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
90    pub instructions: Option<String>,
91    /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
92    #[serde(default)]
93    pub tools: Vec<AssistantTools>,
94    /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
95    pub tool_resources: Option<AssistantToolResources>,
96    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
97    pub metadata: Option<HashMap<String, String>>,
98    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
99    pub temperature: Option<f32>,
100    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
101    /// We generally recommend altering this or temperature but not both.
102    pub top_p: Option<f32>,
103
104    pub response_format: Option<AssistantsApiResponseFormatOption>,
105}
106
107/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
108///
109/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
110///
111/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
112///
113/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
114#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
115pub enum AssistantsApiResponseFormatOption {
116    #[default]
117    #[serde(rename = "auto")]
118    Auto,
119    #[serde(untagged)]
120    Format(ResponseFormat),
121}
122
123/// Retrieval tool
124#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
125pub struct AssistantToolsFileSearch {
126    /// Overrides for the file search tool.
127    #[serde(skip_serializing_if = "Option::is_none")]
128    pub file_search: Option<AssistantToolsFileSearchOverrides>,
129}
130
131#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
132pub struct AssistantToolsFileSearchOverrides {
133    ///  The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive.
134    ///
135    //// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.
136    pub max_num_results: Option<u8>,
137    pub ranking_options: Option<FileSearchRankingOptions>,
138}
139
140#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
141pub enum FileSearchRanker {
142    #[serde(rename = "auto")]
143    Auto,
144    #[serde(rename = "default_2024_08_21")]
145    Default2024_08_21,
146}
147
148/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
149///
150/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.
151#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
152pub struct FileSearchRankingOptions {
153    /// The ranker to use for the file search. If not specified will use the `auto` ranker.
154    #[serde(skip_serializing_if = "Option::is_none")]
155    pub ranker: Option<FileSearchRanker>,
156
157    /// The score threshold for the file search. All values must be a floating point number between 0 and 1.
158    pub score_threshold: f32,
159}
160
161/// Function tool
162#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
163pub struct AssistantToolsFunction {
164    pub function: FunctionObject,
165}
166
167#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
168#[serde(tag = "type")]
169#[serde(rename_all = "snake_case")]
170pub enum AssistantTools {
171    CodeInterpreter,
172    FileSearch(AssistantToolsFileSearch),
173    Function(AssistantToolsFunction),
174}
175
176#[deprecated(
177    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
178)]
179#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
180#[builder(name = "CreateAssistantRequestArgs")]
181#[builder(pattern = "mutable")]
182#[builder(setter(into, strip_option), default)]
183#[builder(derive(Debug))]
184#[builder(build_fn(error = "OpenAIError"))]
185pub struct CreateAssistantRequest {
186    /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
187    pub model: String,
188
189    /// The name of the assistant. The maximum length is 256 characters.
190    #[serde(skip_serializing_if = "Option::is_none")]
191    pub name: Option<String>,
192
193    /// The description of the assistant. The maximum length is 512 characters.
194    #[serde(skip_serializing_if = "Option::is_none")]
195    pub description: Option<String>,
196
197    /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
198    #[serde(skip_serializing_if = "Option::is_none")]
199    pub instructions: Option<String>,
200
201    /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
202    #[serde(skip_serializing_if = "Option::is_none")]
203    pub tools: Option<Vec<AssistantTools>>,
204
205    /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
206    #[serde(skip_serializing_if = "Option::is_none")]
207    pub tool_resources: Option<CreateAssistantToolResources>,
208
209    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
210    #[serde(skip_serializing_if = "Option::is_none")]
211    pub metadata: Option<HashMap<String, String>>,
212
213    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
214    #[serde(skip_serializing_if = "Option::is_none")]
215    pub temperature: Option<f32>,
216
217    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
218    ///
219    /// We generally recommend altering this or temperature but not both.
220    #[serde(skip_serializing_if = "Option::is_none")]
221    pub top_p: Option<f32>,
222
223    #[serde(skip_serializing_if = "Option::is_none")]
224    pub response_format: Option<AssistantsApiResponseFormatOption>,
225}
226
227#[deprecated(
228    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
229)]
230#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
231#[builder(name = "ModifyAssistantRequestArgs")]
232#[builder(pattern = "mutable")]
233#[builder(setter(into, strip_option), default)]
234#[builder(derive(Debug))]
235#[builder(build_fn(error = "OpenAIError"))]
236pub struct ModifyAssistantRequest {
237    /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
238    #[serde(skip_serializing_if = "Option::is_none")]
239    pub model: Option<String>,
240
241    /// The name of the assistant. The maximum length is 256 characters.
242    #[serde(skip_serializing_if = "Option::is_none")]
243    pub name: Option<String>,
244
245    /// The description of the assistant. The maximum length is 512 characters.
246    #[serde(skip_serializing_if = "Option::is_none")]
247    pub description: Option<String>,
248
249    /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub instructions: Option<String>,
252
253    /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub tools: Option<Vec<AssistantTools>>,
256
257    /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
258    #[serde(skip_serializing_if = "Option::is_none")]
259    pub tool_resources: Option<AssistantToolResources>,
260    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
261    #[serde(skip_serializing_if = "Option::is_none")]
262    pub metadata: Option<HashMap<String, String>>,
263
264    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
265    #[serde(skip_serializing_if = "Option::is_none")]
266    pub temperature: Option<f32>,
267
268    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
269    ///
270    /// We generally recommend altering this or temperature but not both.
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub top_p: Option<f32>,
273
274    #[serde(skip_serializing_if = "Option::is_none")]
275    pub response_format: Option<AssistantsApiResponseFormatOption>,
276}
277
278#[deprecated(
279    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
280)]
281#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
282pub struct DeleteAssistantResponse {
283    pub id: String,
284    pub deleted: bool,
285    pub object: String,
286}
287
288#[deprecated(
289    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
290)]
291#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
292pub struct ListAssistantsResponse {
293    pub object: String,
294    pub data: Vec<AssistantObject>,
295    pub first_id: Option<String>,
296    pub last_id: Option<String>,
297    pub has_more: bool,
298}
299
300/// Controls which (if any) tool is called by the model.
301/// `none` means the model will not call any tools and instead generates a message.
302/// `auto` is the default value and means the model can pick between generating a message or calling one or more tools.
303/// `required` means the model must call one or more tools before responding to the user.
304/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
305#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
306#[serde(rename_all = "lowercase")]
307pub enum AssistantsApiToolChoiceOption {
308    #[default]
309    None,
310    Auto,
311    Required,
312    #[serde(untagged)]
313    Named(AssistantsNamedToolChoice),
314}
315
316/// Specifies a tool the model should use. Use to force the model to call a specific tool.
317#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
318pub struct AssistantsNamedToolChoice {
319    /// The type of the tool. If type is `function`, the function name must be set
320    pub r#type: AssistantToolType,
321
322    pub function: Option<FunctionName>,
323}
324
325#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
326#[serde(rename_all = "snake_case")]
327pub enum AssistantToolType {
328    #[default]
329    Function,
330    CodeInterpreter,
331    FileSearch,
332}