async_openai/types/assistants/assistant.rs
1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::error::OpenAIError;
7
8use crate::types::assistants::{
9 FunctionName, FunctionObject, ResponseFormat, StaticChunkingStrategy,
10};
11
12#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
13pub struct AssistantToolCodeInterpreterResources {
14 ///A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
15 pub file_ids: Vec<String>, // maxItems: 20
16}
17
18#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
19pub struct AssistantToolFileSearchResources {
20 /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
21 pub vector_store_ids: Vec<String>,
22}
23
24#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
25pub struct AssistantToolResources {
26 #[serde(skip_serializing_if = "Option::is_none")]
27 pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
28 #[serde(skip_serializing_if = "Option::is_none")]
29 pub file_search: Option<AssistantToolFileSearchResources>,
30}
31
32#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
33pub struct CreateAssistantToolResources {
34 #[serde(skip_serializing_if = "Option::is_none")]
35 pub code_interpreter: Option<AssistantToolCodeInterpreterResources>,
36 #[serde(skip_serializing_if = "Option::is_none")]
37 pub file_search: Option<CreateAssistantToolFileSearchResources>,
38}
39
40#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
41pub struct CreateAssistantToolFileSearchResources {
42 /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.
43 pub vector_store_ids: Option<Vec<String>>,
44 /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.
45 pub vector_stores: Option<Vec<AssistantVectorStore>>,
46}
47
48#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
49pub struct AssistantVectorStore {
50 /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.
51 pub file_ids: Vec<String>,
52
53 /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
54 pub chunking_strategy: Option<AssistantVectorStoreChunkingStrategy>,
55
56 /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
57 pub metadata: Option<HashMap<String, String>>,
58}
59
60#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
61#[serde(tag = "type")]
62pub enum AssistantVectorStoreChunkingStrategy {
63 /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
64 #[default]
65 #[serde(rename = "auto")]
66 Auto,
67 #[serde(rename = "static")]
68 Static { r#static: StaticChunkingStrategy },
69}
70
71/// Represents an `assistant` that can call the model and use tools.
72#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
73pub struct AssistantObject {
74 /// The identifier, which can be referenced in API endpoints.
75 pub id: String,
76 /// The object type, which is always `assistant`.
77 pub object: String,
78 /// The Unix timestamp (in seconds) for when the assistant was created.
79 pub created_at: u64,
80 /// The name of the assistant. The maximum length is 256 characters.
81 pub name: Option<String>,
82 /// The description of the assistant. The maximum length is 512 characters.
83 pub description: Option<String>,
84 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.
85 pub model: String,
86 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
87 pub instructions: Option<String>,
88 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
89 #[serde(default)]
90 pub tools: Vec<AssistantTools>,
91 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
92 pub tool_resources: Option<AssistantToolResources>,
93 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
94 pub metadata: Option<HashMap<String, String>>,
95 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
96 pub temperature: Option<f32>,
97 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
98 /// We generally recommend altering this or temperature but not both.
99 pub top_p: Option<f32>,
100
101 pub response_format: Option<AssistantsApiResponseFormatOption>,
102}
103
104/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
105///
106/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
107///
108/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
109///
110/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
111#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)]
112pub enum AssistantsApiResponseFormatOption {
113 #[default]
114 #[serde(rename = "auto")]
115 Auto,
116 #[serde(untagged)]
117 Format(ResponseFormat),
118}
119
120/// Retrieval tool
121#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
122pub struct AssistantToolsFileSearch {
123 /// Overrides for the file search tool.
124 #[serde(skip_serializing_if = "Option::is_none")]
125 pub file_search: Option<AssistantToolsFileSearchOverrides>,
126}
127
128#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
129pub struct AssistantToolsFileSearchOverrides {
130 /// The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive.
131 ///
132 //// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.
133 pub max_num_results: Option<u8>,
134 pub ranking_options: Option<FileSearchRankingOptions>,
135}
136
137#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
138pub enum FileSearchRanker {
139 #[serde(rename = "auto")]
140 Auto,
141 #[serde(rename = "default_2024_08_21")]
142 Default2024_08_21,
143}
144
145/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
146///
147/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.
148#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
149pub struct FileSearchRankingOptions {
150 /// The ranker to use for the file search. If not specified will use the `auto` ranker.
151 #[serde(skip_serializing_if = "Option::is_none")]
152 pub ranker: Option<FileSearchRanker>,
153
154 /// The score threshold for the file search. All values must be a floating point number between 0 and 1.
155 pub score_threshold: f32,
156}
157
158/// Function tool
159#[derive(Clone, Serialize, Debug, Default, Deserialize, PartialEq)]
160pub struct AssistantToolsFunction {
161 pub function: FunctionObject,
162}
163
164#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
165#[serde(tag = "type")]
166#[serde(rename_all = "snake_case")]
167pub enum AssistantTools {
168 CodeInterpreter,
169 FileSearch(AssistantToolsFileSearch),
170 Function(AssistantToolsFunction),
171}
172
173#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
174#[builder(name = "CreateAssistantRequestArgs")]
175#[builder(pattern = "mutable")]
176#[builder(setter(into, strip_option), default)]
177#[builder(derive(Debug))]
178#[builder(build_fn(error = "OpenAIError"))]
179pub struct CreateAssistantRequest {
180 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
181 pub model: String,
182
183 /// The name of the assistant. The maximum length is 256 characters.
184 #[serde(skip_serializing_if = "Option::is_none")]
185 pub name: Option<String>,
186
187 /// The description of the assistant. The maximum length is 512 characters.
188 #[serde(skip_serializing_if = "Option::is_none")]
189 pub description: Option<String>,
190
191 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
192 #[serde(skip_serializing_if = "Option::is_none")]
193 pub instructions: Option<String>,
194
195 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
196 #[serde(skip_serializing_if = "Option::is_none")]
197 pub tools: Option<Vec<AssistantTools>>,
198
199 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
200 #[serde(skip_serializing_if = "Option::is_none")]
201 pub tool_resources: Option<CreateAssistantToolResources>,
202
203 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
204 #[serde(skip_serializing_if = "Option::is_none")]
205 pub metadata: Option<HashMap<String, String>>,
206
207 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
208 #[serde(skip_serializing_if = "Option::is_none")]
209 pub temperature: Option<f32>,
210
211 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
212 ///
213 /// We generally recommend altering this or temperature but not both.
214 #[serde(skip_serializing_if = "Option::is_none")]
215 pub top_p: Option<f32>,
216
217 #[serde(skip_serializing_if = "Option::is_none")]
218 pub response_format: Option<AssistantsApiResponseFormatOption>,
219}
220
221#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
222#[builder(name = "ModifyAssistantRequestArgs")]
223#[builder(pattern = "mutable")]
224#[builder(setter(into, strip_option), default)]
225#[builder(derive(Debug))]
226#[builder(build_fn(error = "OpenAIError"))]
227pub struct ModifyAssistantRequest {
228 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
229 #[serde(skip_serializing_if = "Option::is_none")]
230 pub model: Option<String>,
231
232 /// The name of the assistant. The maximum length is 256 characters.
233 #[serde(skip_serializing_if = "Option::is_none")]
234 pub name: Option<String>,
235
236 /// The description of the assistant. The maximum length is 512 characters.
237 #[serde(skip_serializing_if = "Option::is_none")]
238 pub description: Option<String>,
239
240 /// The system instructions that the assistant uses. The maximum length is 256,000 characters.
241 #[serde(skip_serializing_if = "Option::is_none")]
242 pub instructions: Option<String>,
243
244 /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
245 #[serde(skip_serializing_if = "Option::is_none")]
246 pub tools: Option<Vec<AssistantTools>>,
247
248 /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
249 #[serde(skip_serializing_if = "Option::is_none")]
250 pub tool_resources: Option<AssistantToolResources>,
251 /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
252 #[serde(skip_serializing_if = "Option::is_none")]
253 pub metadata: Option<HashMap<String, String>>,
254
255 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
256 #[serde(skip_serializing_if = "Option::is_none")]
257 pub temperature: Option<f32>,
258
259 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
260 ///
261 /// We generally recommend altering this or temperature but not both.
262 #[serde(skip_serializing_if = "Option::is_none")]
263 pub top_p: Option<f32>,
264
265 #[serde(skip_serializing_if = "Option::is_none")]
266 pub response_format: Option<AssistantsApiResponseFormatOption>,
267}
268
269#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
270pub struct DeleteAssistantResponse {
271 pub id: String,
272 pub deleted: bool,
273 pub object: String,
274}
275
276#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
277pub struct ListAssistantsResponse {
278 pub object: String,
279 pub data: Vec<AssistantObject>,
280 pub first_id: Option<String>,
281 pub last_id: Option<String>,
282 pub has_more: bool,
283}
284
285/// Controls which (if any) tool is called by the model.
286/// `none` means the model will not call any tools and instead generates a message.
287/// `auto` is the default value and means the model can pick between generating a message or calling one or more tools.
288/// `required` means the model must call one or more tools before responding to the user.
289/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
290#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
291#[serde(rename_all = "lowercase")]
292pub enum AssistantsApiToolChoiceOption {
293 #[default]
294 None,
295 Auto,
296 Required,
297 #[serde(untagged)]
298 Named(AssistantsNamedToolChoice),
299}
300
301/// Specifies a tool the model should use. Use to force the model to call a specific tool.
302#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
303pub struct AssistantsNamedToolChoice {
304 /// The type of the tool. If type is `function`, the function name must be set
305 pub r#type: AssistantToolType,
306
307 pub function: Option<FunctionName>,
308}
309
310#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
311#[serde(rename_all = "snake_case")]
312pub enum AssistantToolType {
313 #[default]
314 Function,
315 CodeInterpreter,
316 FileSearch,
317}